diff -Nru awscli-2.15.9/.changes/1.32.16.json awscli-2.15.22/.changes/1.32.16.json --- awscli-2.15.9/.changes/1.32.16.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.16.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,37 @@ +[ + { + "category": "``connectcampaigns``", + "description": "Minor pattern updates for Campaign and Dial Request API fields.", + "type": "api-change" + }, + { + "category": "``location``", + "description": "This release adds API support for custom layers for the maps service APIs: CreateMap, UpdateMap, DescribeMap.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Add support for account level subscription filter policies to PutAccountPolicy, DescribeAccountPolicies, and DeleteAccountPolicy APIs. Additionally, PutAccountPolicy has been modified with new optional \"selectionCriteria\" parameter for resource selection.", + "type": "api-change" + }, + { + "category": "``qconnect``", + "description": "QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.", + "type": "api-change" + }, + { + "category": "``redshift-serverless``", + "description": "Updates to ConfigParameter for RSS workgroup, removal of use_fips_ssl", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Route53 now supports geoproximity routing in AWS regions", + "type": "api-change" + }, + { + "category": "``wisdom``", + "description": "QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.17.json awscli-2.15.22/.changes/1.32.17.json --- awscli-2.15.9/.changes/1.32.17.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.17.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,37 @@ +[ + { + "category": "``ec2``", + "description": "This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks.", + "type": "api-change" + }, + { + "category": "``events``", + "description": "Update events command to latest version", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "Add ConflictException to Update APIs of AWS IoT Software Package Catalog", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "The following dataTypes have been removed: CUSTOMER_DECODED_INTERFACE in NetworkInterfaceType; CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL in SignalDecoderFailureReason; CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL in NetworkInterfaceFailureReason; CUSTOMER_DECODED_SIGNAL in SignalDecoderType", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Doc only update for Secrets Manager", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.18.json awscli-2.15.22/.changes/1.32.18.json --- awscli-2.15.9/.changes/1.32.18.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.18.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,42 @@ +[ + { + "category": "``connect``", + "description": "Supervisor Barge for Chat is now supported through the MonitorContact API.", + "type": "api-change" + }, + { + "category": "``connectparticipant``", + "description": "Introduce new Supervisor participant role", + "type": "api-change" + }, + { + "category": "``location``", + "description": "Location SDK documentation update. Added missing fonts to the MapConfiguration data type. Updated note for the SubMunicipality property in the place data type.", + "type": "api-change" + }, + { + "category": "``mwaa``", + "description": "This Amazon MWAA feature release includes new fields in CreateWebLoginToken response model. The new fields IamIdentity and AirflowIdentity will let you match identifications, as the Airflow identity length is currently hashed to 64 characters.", + "type": "api-change" + }, + { + "category": "``s3control``", + "description": "S3 On Outposts team adds dualstack endpoints support for S3Control and S3Outposts API calls.", + "type": "api-change" + }, + { + "category": "``supplychain``", + "description": "This release includes APIs CreateBillOfMaterialsImportJob and GetBillOfMaterialsImportJob.", + "type": "api-change" + }, + { + "category": "``transfer``", + "description": "AWS Transfer Family now supports static IP addresses for SFTP & AS2 connectors and for async MDNs on AS2 servers.", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.19.json awscli-2.15.22/.changes/1.32.19.json --- awscli-2.15.9/.changes/1.32.19.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.19.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,7 @@ +[ + { + "category": "``sagemaker``", + "description": "This release will have ValidationException thrown if certain invalid app types are provided. The release will also throw ValidationException if more than 10 account ids are provided in VpcOnlyTrustedAccounts.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.20.json awscli-2.15.22/.changes/1.32.20.json --- awscli-2.15.9/.changes/1.32.20.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.20.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,42 @@ +[ + { + "category": "``iot``", + "description": "Revert release of LogTargetTypes", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "Updated APIs: SignalNodeType query parameter has been added to ListSignalCatalogNodesRequest and ListVehiclesResponse has been extended with attributes field.", + "type": "api-change" + }, + { + "category": "``macie2``", + "description": "This release adds support for analyzing Amazon S3 objects that are encrypted using dual-layer server-side encryption with AWS KMS keys (DSSE-KMS). It also adds support for reporting DSSE-KMS details in statistics and metadata about encryption settings for S3 buckets and objects.", + "type": "api-change" + }, + { + "category": "``payment-cryptography``", + "description": "Provide an additional option for key exchange using RSA wrap/unwrap in addition to tr-34/tr-31 in ImportKey and ExportKey operations. Added new key usage (type) TR31_M1_ISO_9797_1_MAC_KEY, for use with Generate/VerifyMac dataplane operations with ISO9797 Algorithm 1 MAC calculations.", + "type": "api-change" + }, + { + "category": "``personalize-runtime``", + "description": "Documentation updates for Amazon Personalize", + "type": "api-change" + }, + { + "category": "``personalize``", + "description": "Documentation updates for Amazon Personalize.", + "type": "api-change" + }, + { + "category": "``rekognition``", + "description": "This release adds ContentType and TaxonomyLevel attributes to DetectModerationLabels and GetMediaAnalysisJob API responses.", + "type": "api-change" + }, + { + "category": "``securityhub``", + "description": "Documentation updates for AWS Security Hub", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.21.json awscli-2.15.22/.changes/1.32.21.json --- awscli-2.15.9/.changes/1.32.21.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.21.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +[ + { + "category": "``dynamodb``", + "description": "Updating note for enabling streams for UpdateTable.", + "type": "api-change" + }, + { + "category": "``keyspaces``", + "description": "This release adds support for Multi-Region Replication with provisioned tables, and Keyspaces auto scaling APIs", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.22.json awscli-2.15.22/.changes/1.32.22.json --- awscli-2.15.9/.changes/1.32.22.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.22.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,32 @@ +[ + { + "category": "``b2bi``", + "description": "Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs.", + "type": "api-change" + }, + { + "category": "``cloudtrail``", + "description": "This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "GetMetricDataV2 now supports 3 groupings", + "type": "api-change" + }, + { + "category": "``drs``", + "description": "Removed invalid and unnecessary default values.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "Allow support for Snowflake as a Kinesis Data Firehose delivery destination.", + "type": "api-change" + }, + { + "category": "``sagemaker-featurestore-runtime``", + "description": "Increase BatchGetRecord limits from 10 items to 100 items", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.23.json awscli-2.15.22/.changes/1.32.23.json --- awscli-2.15.9/.changes/1.32.23.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.23.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,22 @@ +[ + { + "category": "``athena``", + "description": "Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown.", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "Release CodeBuild Reserved Capacity feature", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API.", + "type": "api-change" + }, + { + "category": "``qconnect``", + "description": "Increased Quick Response name max length to 100", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.24.json awscli-2.15.22/.changes/1.32.24.json --- awscli-2.15.9/.changes/1.32.24.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.24.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,52 @@ +[ + { + "category": "``appconfigdata``", + "description": "Fix FIPS Endpoints in aws-us-gov.", + "type": "api-change" + }, + { + "category": "``cloud9``", + "description": "Doc-only update around removing AL1 from list of available AMIs for Cloud9", + "type": "api-change" + }, + { + "category": "``cloudfront-keyvaluestore``", + "description": "This release improves upon the DescribeKeyValueStore API by returning two additional fields, Status of the KeyValueStore and the FailureReason in case of failures during creation of KeyValueStore.", + "type": "api-change" + }, + { + "category": "``connectcases``", + "description": "This release adds the ability to view audit history on a case and introduces a new parameter, performedBy, for CreateCase and UpdateCase API's.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Documentation updates for Amazon EC2.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release adds support for Transport Layer Security (TLS) and Configurable Timeout to ECS Service Connect. TLS facilitates privacy and data security for inter-service communications, while Configurable Timeout allows customized per-request timeout and idle timeout for Service Connect services.", + "type": "api-change" + }, + { + "category": "``finspace``", + "description": "Allow customer to set zip default through command line arguments.", + "type": "api-change" + }, + { + "category": "``organizations``", + "description": "Doc only update for quota increase change", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS CreateDBCluster API method. This provides enhanced error handling, ensuring a more robust experience when creating database clusters with insufficient instance capacity.", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.25.json awscli-2.15.22/.changes/1.32.25.json --- awscli-2.15.9/.changes/1.32.25.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.25.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,7 @@ +[ + { + "category": "``s3 sync``", + "description": "Disable S3 Express support for s3 sync command", + "type": "bugfix" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.26.json awscli-2.15.22/.changes/1.32.26.json --- awscli-2.15.9/.changes/1.32.26.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.26.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,7 @@ +[ + { + "category": "``inspector2``", + "description": "This release adds support for CIS scans on EC2 instances.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.27.json awscli-2.15.22/.changes/1.32.27.json --- awscli-2.15.9/.changes/1.32.27.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.27.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,27 @@ +[ + { + "category": "``ec2``", + "description": "Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "Documentation updates for Amazon ECS.", + "type": "api-change" + }, + { + "category": "``outposts``", + "description": "DeviceSerialNumber parameter is now optional in StartConnection API", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "This release adds support for Aurora Limitless Database.", + "type": "api-change" + }, + { + "category": "``storagegateway``", + "description": "Add DeprecationDate and SoftwareVersion to response of ListGateways.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.28.json awscli-2.15.22/.changes/1.32.28.json --- awscli-2.15.9/.changes/1.32.28.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.28.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +[ + { + "category": "``acm-pca``", + "description": "AWS Private CA now supports an option to omit the CDP extension from issued certificates, when CRL revocation is enabled.", + "type": "api-change" + }, + { + "category": "``lightsail``", + "description": "This release adds support for IPv6-only instance plans.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.29.json awscli-2.15.22/.changes/1.32.29.json --- awscli-2.15.9/.changes/1.32.29.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.29.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,17 @@ +[ + { + "category": "``connect``", + "description": "Update list and string length limits for predefined attributes.", + "type": "api-change" + }, + { + "category": "``inspector2``", + "description": "This release adds ECR container image scanning based on their lastRecordedPullTime.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.30.json awscli-2.15.22/.changes/1.32.30.json --- awscli-2.15.9/.changes/1.32.30.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.30.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,32 @@ +[ + { + "category": "``autoscaling``", + "description": "EC2 Auto Scaling customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type.", + "type": "api-change" + }, + { + "category": "``comprehend``", + "description": "Comprehend PII analysis now supports Spanish input documents.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "EC2 Fleet customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type.", + "type": "api-change" + }, + { + "category": "``mwaa``", + "description": "This release adds MAINTENANCE environment status for Amazon MWAA environments.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS RestoreDBClusterFromSnapshot and RestoreDBClusterToPointInTime API methods. This provides enhanced error handling, ensuring a more robust experience.", + "type": "api-change" + }, + { + "category": "``snowball``", + "description": "Modified description of createaddress to include direction to add path when providing a JSON file.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.31.json awscli-2.15.22/.changes/1.32.31.json --- awscli-2.15.9/.changes/1.32.31.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.31.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +[ + { + "category": "``datazone``", + "description": "Add new skipDeletionCheck to DeleteDomain. Add new skipDeletionCheck to DeleteProject which also automatically deletes dependent objects", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Update the SDKs for text changes in the APIs.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.32.json awscli-2.15.22/.changes/1.32.32.json --- awscli-2.15.9/.changes/1.32.32.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.32.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,22 @@ +[ + { + "category": "``cloudformation``", + "description": "CloudFormation IaC generator allows you to scan existing resources in your account and select resources to generate a template for a new or existing CloudFormation stack.", + "type": "api-change" + }, + { + "category": "``elbv2``", + "description": "Update elbv2 command to latest version", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Update page size limits for GetJobRuns and GetTriggers APIs.", + "type": "api-change" + }, + { + "category": "``ssm``", + "description": "This release adds an optional Duration parameter to StateManager Associations. This allows customers to specify how long an apply-only-on-cron association execution should run. Once the specified Duration is out all the ongoing cancellable commands or automations are cancelled.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.33.json awscli-2.15.22/.changes/1.32.33.json --- awscli-2.15.9/.changes/1.32.33.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.33.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,27 @@ +[ + { + "category": "``cognito-idp``", + "description": "Added CreateIdentityProvider and UpdateIdentityProvider details for new SAML IdP features", + "type": "api-change" + }, + { + "category": "``ivs``", + "description": "This release introduces a new resource Playback Restriction Policy which can be used to geo-restrict or domain-restrict channel stream playback when associated with a channel. New APIs to support this resource were introduced in the form of Create/Delete/Get/Update/List.", + "type": "api-change" + }, + { + "category": "``managedblockchain-query``", + "description": "This release adds support for transactions that have not reached finality. It also removes support for the status property from the response of the GetTransaction operation. You can use the confirmationStatus and executionStatus properties to determine the status of the transaction.", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "This release includes support for broadcast-mixed audio description tracks.", + "type": "api-change" + }, + { + "category": "``neptune-graph``", + "description": "Adding new APIs in SDK for Amazon Neptune Analytics. These APIs include operations to execute, cancel, list queries and get the graph summary.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.34.json awscli-2.15.22/.changes/1.32.34.json --- awscli-2.15.9/.changes/1.32.34.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.34.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,17 @@ +[ + { + "category": "``dynamodb``", + "description": "Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings.", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.35.json awscli-2.15.22/.changes/1.32.35.json --- awscli-2.15.9/.changes/1.32.35.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.35.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +[ + { + "category": "``glue``", + "description": "Introduce Catalog Encryption Role within Glue Data Catalog Settings. Introduce SASL/PLAIN as an authentication method for Glue Kafka connections", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added definitions of various WorkSpace states", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.36.json awscli-2.15.22/.changes/1.32.36.json --- awscli-2.15.9/.changes/1.32.36.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.36.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,32 @@ +[ + { + "category": "``appsync``", + "description": "Support for environment variables in AppSync GraphQL APIs", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release is a documentation only update to address customer issues.", + "type": "api-change" + }, + { + "category": "``es``", + "description": "This release adds clear visibility to the customers on the changes that they make on the domain.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "This release adds a new field, logGroupArn, to the response of the logs:DescribeLogGroups action.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release adds clear visibility to the customers on the changes that they make on the domain.", + "type": "api-change" + }, + { + "category": "``wafv2``", + "description": "You can now delete an API key that you've created for use with your CAPTCHA JavaScript integration API.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.37.json awscli-2.15.22/.changes/1.32.37.json --- awscli-2.15.9/.changes/1.32.37.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.37.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,17 @@ +[ + { + "category": "``datasync``", + "description": "AWS DataSync now supports manifests for specifying files or objects to transfer.", + "type": "api-change" + }, + { + "category": "``lexv2-models``", + "description": "Update lexv2-models command to latest version", + "type": "api-change" + }, + { + "category": "``redshift``", + "description": "LisRecommendations API to fetch Amazon Redshift Advisor recommendations.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.38.json awscli-2.15.22/.changes/1.32.38.json --- awscli-2.15.9/.changes/1.32.38.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.38.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,17 @@ +[ + { + "category": "``codepipeline``", + "description": "Add ability to execute pipelines with new parallel & queued execution modes and add support for triggers with filtering on branches and file paths.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "General Interactions for Visuals; Waterfall Chart Color Configuration; Documentation Update", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "This release introduces User-Decoupling feature. This feature allows Workspaces Core customers to provision workspaces without providing users. CreateWorkspaces and DescribeWorkspaces APIs will now take a new optional parameter \"WorkspaceName\".", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.39.json awscli-2.15.22/.changes/1.32.39.json --- awscli-2.15.9/.changes/1.32.39.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.39.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,37 @@ +[ + { + "category": "``amp``", + "description": "Overall documentation updates.", + "type": "api-change" + }, + { + "category": "``batch``", + "description": "This feature allows Batch to support configuration of repository credentials for jobs running on ECS", + "type": "api-change" + }, + { + "category": "``braket``", + "description": "Creating a job will result in DeviceOfflineException when using an offline device, and DeviceRetiredException when using a retired device.", + "type": "api-change" + }, + { + "category": "``cost-optimization-hub``", + "description": "Adding includeMemberAccounts field to the response of ListEnrollmentStatuses API.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "Documentation only update for Amazon ECS.", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "This release allows AWS IoT Core users to enable Online Certificate Status Protocol (OCSP) Stapling for TLS X.509 Server Certificates when creating and updating AWS IoT Domain Configurations with Custom Domain.", + "type": "api-change" + }, + { + "category": "``pricing``", + "description": "Add Throttling Exception to all APIs.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.40.json awscli-2.15.22/.changes/1.32.40.json --- awscli-2.15.9/.changes/1.32.40.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.40.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,22 @@ +[ + { + "category": "``appsync``", + "description": "Adds support for new options on GraphqlAPIs, Resolvers and Data Sources for emitting Amazon CloudWatch metrics for enhanced monitoring of AppSync APIs.", + "type": "api-change" + }, + { + "category": "``cloudwatch``", + "description": "Update cloudwatch command to latest version", + "type": "api-change" + }, + { + "category": "``neptune-graph``", + "description": "Adding a new option \"parameters\" for data plane api ExecuteQuery to support running parameterized query via SDK.", + "type": "api-change" + }, + { + "category": "``route53domains``", + "description": "This release adds bill contact support for RegisterDomain, TransferDomain, UpdateDomainContact and GetDomainDetail API.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.41.json awscli-2.15.22/.changes/1.32.41.json --- awscli-2.15.9/.changes/1.32.41.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.41.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,27 @@ +[ + { + "category": "``lightsail``", + "description": "This release adds support to upgrade the major version of a database.", + "type": "api-change" + }, + { + "category": "``marketplace-catalog``", + "description": "AWS Marketplace Catalog API now supports setting intent on requests", + "type": "api-change" + }, + { + "category": "``resource-explorer-2``", + "description": "Resource Explorer now uses newly supported IPv4 'amazonaws.com' endpoints by default.", + "type": "api-change" + }, + { + "category": "``securitylake``", + "description": "Documentation updates for Security Lake", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.42.json awscli-2.15.22/.changes/1.32.42.json --- awscli-2.15.9/.changes/1.32.42.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.42.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,17 @@ +[ + { + "category": "``controltower``", + "description": "Adds support for new Baseline and EnabledBaseline APIs for automating multi-account governance.", + "type": "api-change" + }, + { + "category": "``lookoutequipment``", + "description": "This feature allows customers to see pointwise model diagnostics results for their models.", + "type": "api-change" + }, + { + "category": "``qbusiness``", + "description": "This release adds the metadata-boosting feature, which allows customers to easily fine-tune the underlying ranking of retrieved RAG passages in order to optimize Q&A answer relevance. It also adds new feedback reasons for the PutFeedback API.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.43.json awscli-2.15.22/.changes/1.32.43.json --- awscli-2.15.9/.changes/1.32.43.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.43.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,52 @@ +[ + { + "category": "``artifact``", + "description": "This is the initial SDK release for AWS Artifact. AWS Artifact provides on-demand access to compliance and third-party compliance reports. This release includes access to List and Get reports, along with their metadata. This release also includes access to AWS Artifact notifications settings.", + "type": "api-change" + }, + { + "category": "``codepipeline``", + "description": "Add ability to override timeout on action level.", + "type": "api-change" + }, + { + "category": "``detective``", + "description": "Doc only updates for content enhancement", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "Marked fields IpAddressV4, PrivateIpAddress, Email as Sensitive.", + "type": "api-change" + }, + { + "category": "``healthlake``", + "description": "This release adds a new response parameter, JobProgressReport, to the DescribeFHIRImportJob and ListFHIRImportJobs API operation. JobProgressReport provides details on the progress of the import job on the server.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "Adds additional supported instance types.", + "type": "api-change" + }, + { + "category": "``polly``", + "description": "Amazon Polly adds 1 new voice - Burcu (tr-TR)", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release adds a new API UpdateClusterSoftware for SageMaker HyperPod. This API allows users to patch HyperPod clusters with latest platform softwares.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Doc only update for Secrets Manager", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.44.json awscli-2.15.22/.changes/1.32.44.json --- awscli-2.15.9/.changes/1.32.44.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.44.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,32 @@ +[ + { + "category": "``connectparticipant``", + "description": "Doc only update to GetTranscript API reference guide to inform users about presence of events in the chat transcript.", + "type": "api-change" + }, + { + "category": "``emr``", + "description": "adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations.", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Documentation-only updates for Lambda to clarify a number of existing actions and properties.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Doc only update for a valid option in DB parameter group", + "type": "api-change" + }, + { + "category": "``sns``", + "description": "This release marks phone numbers as sensitive inputs.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.45.json awscli-2.15.22/.changes/1.32.45.json --- awscli-2.15.9/.changes/1.32.45.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.45.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,32 @@ +[ + { + "category": "``amplify``", + "description": "This release contains API changes that enable users to configure their Amplify domains with their own custom SSL/TLS certificate.", + "type": "api-change" + }, + { + "category": "``chatbot``", + "description": "This release adds support for AWS Chatbot. You can now monitor, operate, and troubleshoot your AWS resources with interactive ChatOps using the AWS SDK.", + "type": "api-change" + }, + { + "category": "``config``", + "description": "Documentation updates for the AWS Config CLI", + "type": "api-change" + }, + { + "category": "``ivs``", + "description": "Changed description for latencyMode in Create/UpdateChannel and Channel/ChannelSummary.", + "type": "api-change" + }, + { + "category": "``keyspaces``", + "description": "Documentation updates for Amazon Keyspaces", + "type": "api-change" + }, + { + "category": "``mediatailor``", + "description": "MediaTailor: marking #AdBreak.OffsetMillis as required.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/1.32.46.json awscli-2.15.22/.changes/1.32.46.json --- awscli-2.15.9/.changes/1.32.46.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/1.32.46.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,17 @@ +[ + { + "category": "``dynamodb``", + "description": "Publishing quick fix for doc only update.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "This release updates a few Firehose related APIs.", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Add .NET 8 (dotnet8) Runtime support to AWS Lambda.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.10.json awscli-2.15.22/.changes/2.15.10.json --- awscli-2.15.9/.changes/2.15.10.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.10.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,72 @@ +[ + { + "category": "``iot``", + "description": "Add ConflictException to Update APIs of AWS IoT Software Package Catalog", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "The following dataTypes have been removed: CUSTOMER_DECODED_INTERFACE in NetworkInterfaceType; CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL in SignalDecoderFailureReason; CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL in NetworkInterfaceFailureReason; CUSTOMER_DECODED_SIGNAL in SignalDecoderType", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Doc only update for Secrets Manager", + "type": "api-change" + }, + { + "category": "``events``", + "description": "Update events command to latest version", + "type": "api-change" + }, + { + "category": "``connectcampaigns``", + "description": "Minor pattern updates for Campaign and Dial Request API fields.", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Route53 now supports geoproximity routing in AWS regions", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "Add support for account level subscription filter policies to PutAccountPolicy, DescribeAccountPolicies, and DeleteAccountPolicy APIs. Additionally, PutAccountPolicy has been modified with new optional \"selectionCriteria\" parameter for resource selection.", + "type": "api-change" + }, + { + "category": "``location``", + "description": "This release adds API support for custom layers for the maps service APIs: CreateMap, UpdateMap, DescribeMap.", + "type": "api-change" + }, + { + "category": "``wisdom``", + "description": "QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.", + "type": "api-change" + }, + { + "category": "``redshift-serverless``", + "description": "Updates to ConfigParameter for RSS workgroup, removal of use_fips_ssl", + "type": "api-change" + }, + { + "category": "``qconnect``", + "description": "QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.11.json awscli-2.15.22/.changes/2.15.11.json --- awscli-2.15.9/.changes/2.15.11.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.11.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,87 @@ +[ + { + "category": "``iot``", + "description": "Revert release of LogTargetTypes", + "type": "api-change" + }, + { + "category": "``rekognition``", + "description": "This release adds ContentType and TaxonomyLevel attributes to DetectModerationLabels and GetMediaAnalysisJob API responses.", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + }, + { + "category": "``location``", + "description": "Location SDK documentation update. Added missing fonts to the MapConfiguration data type. Updated note for the SubMunicipality property in the place data type.", + "type": "api-change" + }, + { + "category": "``supplychain``", + "description": "This release includes APIs CreateBillOfMaterialsImportJob and GetBillOfMaterialsImportJob.", + "type": "api-change" + }, + { + "category": "``s3control``", + "description": "S3 On Outposts team adds dualstack endpoints support for S3Control and S3Outposts API calls.", + "type": "api-change" + }, + { + "category": "``securityhub``", + "description": "Documentation updates for AWS Security Hub", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release will have ValidationException thrown if certain invalid app types are provided. The release will also throw ValidationException if more than 10 account ids are provided in VpcOnlyTrustedAccounts.", + "type": "api-change" + }, + { + "category": "``mwaa``", + "description": "This Amazon MWAA feature release includes new fields in CreateWebLoginToken response model. The new fields IamIdentity and AirflowIdentity will let you match identifications, as the Airflow identity length is currently hashed to 64 characters.", + "type": "api-change" + }, + { + "category": "``transfer``", + "description": "AWS Transfer Family now supports static IP addresses for SFTP & AS2 connectors and for async MDNs on AS2 servers.", + "type": "api-change" + }, + { + "category": "``personalize-runtime``", + "description": "Documentation updates for Amazon Personalize", + "type": "api-change" + }, + { + "category": "``personalize``", + "description": "Documentation updates for Amazon Personalize.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Supervisor Barge for Chat is now supported through the MonitorContact API.", + "type": "api-change" + }, + { + "category": "``connectparticipant``", + "description": "Introduce new Supervisor participant role", + "type": "api-change" + }, + { + "category": "``iotfleetwise``", + "description": "Updated APIs: SignalNodeType query parameter has been added to ListSignalCatalogNodesRequest and ListVehiclesResponse has been extended with attributes field.", + "type": "api-change" + }, + { + "category": "``payment-cryptography``", + "description": "Provide an additional option for key exchange using RSA wrap/unwrap in addition to tr-34/tr-31 in ImportKey and ExportKey operations. Added new key usage (type) TR31_M1_ISO_9797_1_MAC_KEY, for use with Generate/VerifyMac dataplane operations with ISO9797 Algorithm 1 MAC calculations.", + "type": "api-change" + }, + { + "category": "``macie2``", + "description": "This release adds support for analyzing Amazon S3 objects that are encrypted using dual-layer server-side encryption with AWS KMS keys (DSSE-KMS). It also adds support for reporting DSSE-KMS details in statistics and metadata about encryption settings for S3 buckets and objects.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.12.json awscli-2.15.22/.changes/2.15.12.json --- awscli-2.15.9/.changes/2.15.12.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.12.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,42 @@ +[ + { + "category": "``keyspaces``", + "description": "This release adds support for Multi-Region Replication with provisioned tables, and Keyspaces auto scaling APIs", + "type": "api-change" + }, + { + "category": "``b2bi``", + "description": "Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "GetMetricDataV2 now supports 3 groupings", + "type": "api-change" + }, + { + "category": "``cloudtrail``", + "description": "This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights.", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "Updating note for enabling streams for UpdateTable.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "Allow support for Snowflake as a Kinesis Data Firehose delivery destination.", + "type": "api-change" + }, + { + "category": "``sagemaker-featurestore-runtime``", + "description": "Increase BatchGetRecord limits from 10 items to 100 items", + "type": "api-change" + }, + { + "category": "``drs``", + "description": "Removed invalid and unnecessary default values.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.13.json awscli-2.15.22/.changes/2.15.13.json --- awscli-2.15.9/.changes/2.15.13.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.13.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,7 @@ +[ + { + "category": "``s3 sync``", + "description": "Disable S3 Express support for s3 sync command", + "type": "bugfix" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.14.json awscli-2.15.22/.changes/2.15.14.json --- awscli-2.15.9/.changes/2.15.14.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.14.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,77 @@ +[ + { + "category": "``cloudfront-keyvaluestore``", + "description": "This release improves upon the DescribeKeyValueStore API by returning two additional fields, Status of the KeyValueStore and the FailureReason in case of failures during creation of KeyValueStore.", + "type": "api-change" + }, + { + "category": "``codebuild``", + "description": "Release CodeBuild Reserved Capacity feature", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + }, + { + "category": "``cloud9``", + "description": "Doc-only update around removing AL1 from list of available AMIs for Cloud9", + "type": "api-change" + }, + { + "category": "``qconnect``", + "description": "Increased Quick Response name max length to 100", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Documentation updates for Amazon EC2.", + "type": "api-change" + }, + { + "category": "``appconfigdata``", + "description": "Fix FIPS Endpoints in aws-us-gov.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "This release adds support for Transport Layer Security (TLS) and Configurable Timeout to ECS Service Connect. TLS facilitates privacy and data security for inter-service communications, while Configurable Timeout allows customized per-request timeout and idle timeout for Service Connect services.", + "type": "api-change" + }, + { + "category": "``organizations``", + "description": "Doc only update for quota increase change", + "type": "api-change" + }, + { + "category": "``athena``", + "description": "Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown.", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API.", + "type": "api-change" + }, + { + "category": "``connectcases``", + "description": "This release adds the ability to view audit history on a case and introduces a new parameter, performedBy, for CreateCase and UpdateCase API's.", + "type": "api-change" + }, + { + "category": "``inspector2``", + "description": "This release adds support for CIS scans on EC2 instances.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS CreateDBCluster API method. This provides enhanced error handling, ensuring a more robust experience when creating database clusters with insufficient instance capacity.", + "type": "api-change" + }, + { + "category": "``finspace``", + "description": "Allow customer to set zip default through command line arguments.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.15.json awscli-2.15.22/.changes/2.15.15.json --- awscli-2.15.9/.changes/2.15.15.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.15.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,37 @@ +[ + { + "category": "``rds``", + "description": "This release adds support for Aurora Limitless Database.", + "type": "api-change" + }, + { + "category": "``storagegateway``", + "description": "Add DeprecationDate and SoftwareVersion to response of ListGateways.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs.", + "type": "api-change" + }, + { + "category": "``acm-pca``", + "description": "AWS Private CA now supports an option to omit the CDP extension from issued certificates, when CRL revocation is enabled.", + "type": "api-change" + }, + { + "category": "``lightsail``", + "description": "This release adds support for IPv6-only instance plans.", + "type": "api-change" + }, + { + "category": "``outposts``", + "description": "DeviceSerialNumber parameter is now optional in StartConnection API", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "Documentation updates for Amazon ECS.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.16.json awscli-2.15.22/.changes/2.15.16.json --- awscli-2.15.9/.changes/2.15.16.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.16.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,57 @@ +[ + { + "category": "``mwaa``", + "description": "This release adds MAINTENANCE environment status for Amazon MWAA environments.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS RestoreDBClusterFromSnapshot and RestoreDBClusterToPointInTime API methods. This provides enhanced error handling, ensuring a more robust experience.", + "type": "api-change" + }, + { + "category": "``inspector2``", + "description": "This release adds ECR container image scanning based on their lastRecordedPullTime.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Update list and string length limits for predefined attributes.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs.", + "type": "api-change" + }, + { + "category": "``comprehend``", + "description": "Comprehend PII analysis now supports Spanish input documents.", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Update the SDKs for text changes in the APIs.", + "type": "api-change" + }, + { + "category": "``snowball``", + "description": "Modified description of createaddress to include direction to add path when providing a JSON file.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "EC2 Fleet customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type.", + "type": "api-change" + }, + { + "category": "``datazone``", + "description": "Add new skipDeletionCheck to DeleteDomain. Add new skipDeletionCheck to DeleteProject which also automatically deletes dependent objects", + "type": "api-change" + }, + { + "category": "``autoscaling``", + "description": "EC2 Auto Scaling customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.17.json awscli-2.15.22/.changes/2.15.17.json --- awscli-2.15.9/.changes/2.15.17.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.17.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,47 @@ +[ + { + "category": "``ssm``", + "description": "This release adds an optional Duration parameter to StateManager Associations. This allows customers to specify how long an apply-only-on-cron association execution should run. Once the specified Duration is out all the ongoing cancellable commands or automations are cancelled.", + "type": "api-change" + }, + { + "category": "``neptune-graph``", + "description": "Adding new APIs in SDK for Amazon Neptune Analytics. These APIs include operations to execute, cancel, list queries and get the graph summary.", + "type": "api-change" + }, + { + "category": "``cognito-idp``", + "description": "Added CreateIdentityProvider and UpdateIdentityProvider details for new SAML IdP features", + "type": "api-change" + }, + { + "category": "``elbv2``", + "description": "Update elbv2 command to latest version", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Update page size limits for GetJobRuns and GetTriggers APIs.", + "type": "api-change" + }, + { + "category": "``managedblockchain-query``", + "description": "This release adds support for transactions that have not reached finality. It also removes support for the status property from the response of the GetTransaction operation. You can use the confirmationStatus and executionStatus properties to determine the status of the transaction.", + "type": "api-change" + }, + { + "category": "``cloudformation``", + "description": "CloudFormation IaC generator allows you to scan existing resources in your account and select resources to generate a template for a new or existing CloudFormation stack.", + "type": "api-change" + }, + { + "category": "``ivs``", + "description": "This release introduces a new resource Playback Restriction Policy which can be used to geo-restrict or domain-restrict channel stream playback when associated with a channel. New APIs to support this resource were introduced in the form of Create/Delete/Get/Update/List.", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "This release includes support for broadcast-mixed audio description tracks.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.18.json awscli-2.15.22/.changes/2.15.18.json --- awscli-2.15.9/.changes/2.15.18.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.18.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,62 @@ +[ + { + "category": "``ecs``", + "description": "This release is a documentation only update to address customer issues.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "Added definitions of various WorkSpace states", + "type": "api-change" + }, + { + "category": "``es``", + "description": "This release adds clear visibility to the customers on the changes that they make on the domain.", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings.", + "type": "api-change" + }, + { + "category": "``appsync``", + "description": "Support for environment variables in AppSync GraphQL APIs", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + }, + { + "category": "dependency", + "description": "Update ``flit_core`` version range ceiling to 3.9.0", + "type": "enhancement" + }, + { + "category": "``logs``", + "description": "This release adds a new field, logGroupArn, to the response of the logs:DescribeLogGroups action.", + "type": "api-change" + }, + { + "category": "``wafv2``", + "description": "You can now delete an API key that you've created for use with your CAPTCHA JavaScript integration API.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "This release adds clear visibility to the customers on the changes that they make on the domain.", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Introduce Catalog Encryption Role within Glue Data Catalog Settings. Introduce SASL/PLAIN as an authentication method for Glue Kafka connections", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.19.json awscli-2.15.22/.changes/2.15.19.json --- awscli-2.15.9/.changes/2.15.19.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.19.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,32 @@ +[ + { + "category": "``quicksight``", + "description": "General Interactions for Visuals; Waterfall Chart Color Configuration; Documentation Update", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "This release introduces User-Decoupling feature. This feature allows Workspaces Core customers to provision workspaces without providing users. CreateWorkspaces and DescribeWorkspaces APIs will now take a new optional parameter \"WorkspaceName\".", + "type": "api-change" + }, + { + "category": "``datasync``", + "description": "AWS DataSync now supports manifests for specifying files or objects to transfer.", + "type": "api-change" + }, + { + "category": "``codepipeline``", + "description": "Add ability to execute pipelines with new parallel & queued execution modes and add support for triggers with filtering on branches and file paths.", + "type": "api-change" + }, + { + "category": "``redshift``", + "description": "LisRecommendations API to fetch Amazon Redshift Advisor recommendations.", + "type": "api-change" + }, + { + "category": "``lexv2-models``", + "description": "Update lexv2-models command to latest version", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.20.json awscli-2.15.22/.changes/2.15.20.json --- awscli-2.15.9/.changes/2.15.20.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.20.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,82 @@ +[ + { + "category": "``appsync``", + "description": "Adds support for new options on GraphqlAPIs, Resolvers and Data Sources for emitting Amazon CloudWatch metrics for enhanced monitoring of AppSync APIs.", + "type": "api-change" + }, + { + "category": "``neptune-graph``", + "description": "Adding a new option \"parameters\" for data plane api ExecuteQuery to support running parameterized query via SDK.", + "type": "api-change" + }, + { + "category": "``pricing``", + "description": "Add Throttling Exception to all APIs.", + "type": "api-change" + }, + { + "category": "``marketplace-catalog``", + "description": "AWS Marketplace Catalog API now supports setting intent on requests", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + }, + { + "category": "``cost-optimization-hub``", + "description": "Adding includeMemberAccounts field to the response of ListEnrollmentStatuses API.", + "type": "api-change" + }, + { + "category": "``braket``", + "description": "Creating a job will result in DeviceOfflineException when using an offline device, and DeviceRetiredException when using a retired device.", + "type": "api-change" + }, + { + "category": "``securitylake``", + "description": "Documentation updates for Security Lake", + "type": "api-change" + }, + { + "category": "``resource-explorer-2``", + "description": "Resource Explorer now uses newly supported IPv4 'amazonaws.com' endpoints by default.", + "type": "api-change" + }, + { + "category": "``amp``", + "description": "Overall documentation updates.", + "type": "api-change" + }, + { + "category": "``cloudwatch``", + "description": "Update cloudwatch command to latest version", + "type": "api-change" + }, + { + "category": "``route53domains``", + "description": "This release adds bill contact support for RegisterDomain, TransferDomain, UpdateDomainContact and GetDomainDetail API.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "Documentation only update for Amazon ECS.", + "type": "api-change" + }, + { + "category": "``lightsail``", + "description": "This release adds support to upgrade the major version of a database.", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "This release allows AWS IoT Core users to enable Online Certificate Status Protocol (OCSP) Stapling for TLS X.509 Server Certificates when creating and updating AWS IoT Domain Configurations with Custom Domain.", + "type": "api-change" + }, + { + "category": "``batch``", + "description": "This feature allows Batch to support configuration of repository credentials for jobs running on ECS", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.21.json awscli-2.15.22/.changes/2.15.21.json --- awscli-2.15.9/.changes/2.15.21.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.21.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,67 @@ +[ + { + "category": "``secretsmanager``", + "description": "Doc only update for Secrets Manager", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "Adds additional supported instance types.", + "type": "api-change" + }, + { + "category": "``codepipeline``", + "description": "Add ability to override timeout on action level.", + "type": "api-change" + }, + { + "category": "``endpoint-rules``", + "description": "Update endpoint-rules command to latest version", + "type": "api-change" + }, + { + "category": "``lookoutequipment``", + "description": "This feature allows customers to see pointwise model diagnostics results for their models.", + "type": "api-change" + }, + { + "category": "``artifact``", + "description": "This is the initial SDK release for AWS Artifact. AWS Artifact provides on-demand access to compliance and third-party compliance reports. This release includes access to List and Get reports, along with their metadata. This release also includes access to AWS Artifact notifications settings.", + "type": "api-change" + }, + { + "category": "``detective``", + "description": "Doc only updates for content enhancement", + "type": "api-change" + }, + { + "category": "``healthlake``", + "description": "This release adds a new response parameter, JobProgressReport, to the DescribeFHIRImportJob and ListFHIRImportJobs API operation. JobProgressReport provides details on the progress of the import job on the server.", + "type": "api-change" + }, + { + "category": "``polly``", + "description": "Amazon Polly adds 1 new voice - Burcu (tr-TR)", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "This release adds a new API UpdateClusterSoftware for SageMaker HyperPod. This API allows users to patch HyperPod clusters with latest platform softwares.", + "type": "api-change" + }, + { + "category": "``qbusiness``", + "description": "This release adds the metadata-boosting feature, which allows customers to easily fine-tune the underlying ranking of retrieved RAG passages in order to optimize Q&A answer relevance. It also adds new feedback reasons for the PutFeedback API.", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "Marked fields IpAddressV4, PrivateIpAddress, Email as Sensitive.", + "type": "api-change" + }, + { + "category": "``controltower``", + "description": "Adds support for new Baseline and EnabledBaseline APIs for automating multi-account governance.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.changes/2.15.22.json awscli-2.15.22/.changes/2.15.22.json --- awscli-2.15.9/.changes/2.15.22.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.changes/2.15.22.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,77 @@ +[ + { + "category": "``keyspaces``", + "description": "Documentation updates for Amazon Keyspaces", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations.", + "type": "api-change" + }, + { + "category": "``dynamodb``", + "description": "Publishing quick fix for doc only update.", + "type": "api-change" + }, + { + "category": "``ivs``", + "description": "Changed description for latencyMode in Create/UpdateChannel and Channel/ChannelSummary.", + "type": "api-change" + }, + { + "category": "``firehose``", + "description": "This release updates a few Firehose related APIs.", + "type": "api-change" + }, + { + "category": "``chatbot``", + "description": "This release adds support for AWS Chatbot. You can now monitor, operate, and troubleshoot your AWS resources with interactive ChatOps using the AWS SDK.", + "type": "api-change" + }, + { + "category": "``sns``", + "description": "This release marks phone numbers as sensitive inputs.", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Add .NET 8 (dotnet8) Runtime support to AWS Lambda.", + "type": "api-change" + }, + { + "category": "``amplify``", + "description": "This release contains API changes that enable users to configure their Amplify domains with their own custom SSL/TLS certificate.", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Documentation-only updates for Lambda to clarify a number of existing actions and properties.", + "type": "api-change" + }, + { + "category": "``emr``", + "description": "adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce", + "type": "api-change" + }, + { + "category": "``config``", + "description": "Documentation updates for the AWS Config CLI", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Doc only update for a valid option in DB parameter group", + "type": "api-change" + }, + { + "category": "``connectparticipant``", + "description": "Doc only update to GetTranscript API reference guide to inform users about presence of events in the chat transcript.", + "type": "api-change" + }, + { + "category": "``mediatailor``", + "description": "MediaTailor: marking #AdBreak.OffsetMillis as required.", + "type": "api-change" + } +] \ No newline at end of file diff -Nru awscli-2.15.9/.github/workflows/closed-issue-message.yml awscli-2.15.22/.github/workflows/closed-issue-message.yml --- awscli-2.15.9/.github/workflows/closed-issue-message.yml 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/.github/workflows/closed-issue-message.yml 2024-02-21 17:34:54.000000000 +0000 @@ -11,6 +11,5 @@ # These inputs are both required repo-token: "${{ secrets.GITHUB_TOKEN }}" message: | - ### ⚠️COMMENT VISIBILITY WARNING⚠️ - Comments on closed issues are hard for our team to see. - If you need more assistance, please open a new issue that references this one. If you wish to keep having a conversation with other community members under this issue feel free to do so. + This issue is now closed. Comments on closed issues are hard for our team to see. + If you need more assistance, please open a new issue that references this one. diff -Nru awscli-2.15.9/.github/workflows/run-dep-tests.yml awscli-2.15.22/.github/workflows/run-dep-tests.yml --- awscli-2.15.9/.github/workflows/run-dep-tests.yml 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/.github/workflows/run-dep-tests.yml 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,27 @@ +name: Run dependency tests + +on: + push: + pull_request: + branches-ignore: [ master ] + +jobs: + build: + + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + python-version: ["3.8", "3.9", "3.10", "3.11"] + os: [ubuntu-latest, macOS-latest, windows-latest] + + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: python scripts/ci/install + - name: Run tests + run: python scripts/ci/run-dep-tests diff -Nru awscli-2.15.9/.github/workflows/run-tests.yml awscli-2.15.22/.github/workflows/run-tests.yml --- awscli-2.15.9/.github/workflows/run-tests.yml 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/.github/workflows/run-tests.yml 2024-02-21 17:34:54.000000000 +0000 @@ -32,6 +32,6 @@ pip uninstall -y awscli python scripts/ci/run-backend-tests - name: codecov - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v4 with: directory: tests diff -Nru awscli-2.15.9/.github/workflows/stale_issue.yml awscli-2.15.22/.github/workflows/stale_issue.yml --- awscli-2.15.9/.github/workflows/stale_issue.yml 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/.github/workflows/stale_issue.yml 2024-02-21 17:34:54.000000000 +0000 @@ -29,8 +29,8 @@ closed-for-staleness-label: closed-for-staleness # Issue timing - days-before-stale: 5 - days-before-close: 2 + days-before-stale: 10 + days-before-close: 4 # If you don't want to mark a issue as being ancient based on a # threshold of "upvotes", you can set this here. An "upvote" is diff -Nru awscli-2.15.9/CHANGELOG.rst awscli-2.15.22/CHANGELOG.rst --- awscli-2.15.9/CHANGELOG.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/CHANGELOG.rst 2024-02-21 17:34:54.000000000 +0000 @@ -2,6 +2,215 @@ CHANGELOG ========= +2.15.22 +======= + +* api-change:``keyspaces``: Documentation updates for Amazon Keyspaces +* api-change:``firehose``: This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations. +* api-change:``dynamodb``: Publishing quick fix for doc only update. +* api-change:``ivs``: Changed description for latencyMode in Create/UpdateChannel and Channel/ChannelSummary. +* api-change:``firehose``: This release updates a few Firehose related APIs. +* api-change:``chatbot``: This release adds support for AWS Chatbot. You can now monitor, operate, and troubleshoot your AWS resources with interactive ChatOps using the AWS SDK. +* api-change:``sns``: This release marks phone numbers as sensitive inputs. +* api-change:``lambda``: Add .NET 8 (dotnet8) Runtime support to AWS Lambda. +* api-change:``amplify``: This release contains API changes that enable users to configure their Amplify domains with their own custom SSL/TLS certificate. +* api-change:``lambda``: Documentation-only updates for Lambda to clarify a number of existing actions and properties. +* api-change:``emr``: adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce +* api-change:``config``: Documentation updates for the AWS Config CLI +* api-change:``rds``: Doc only update for a valid option in DB parameter group +* api-change:``connectparticipant``: Doc only update to GetTranscript API reference guide to inform users about presence of events in the chat transcript. +* api-change:``mediatailor``: MediaTailor: marking #AdBreak.OffsetMillis as required. + + +2.15.21 +======= + +* api-change:``secretsmanager``: Doc only update for Secrets Manager +* api-change:``opensearch``: Adds additional supported instance types. +* api-change:``codepipeline``: Add ability to override timeout on action level. +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version +* api-change:``lookoutequipment``: This feature allows customers to see pointwise model diagnostics results for their models. +* api-change:``artifact``: This is the initial SDK release for AWS Artifact. AWS Artifact provides on-demand access to compliance and third-party compliance reports. This release includes access to List and Get reports, along with their metadata. This release also includes access to AWS Artifact notifications settings. +* api-change:``detective``: Doc only updates for content enhancement +* api-change:``healthlake``: This release adds a new response parameter, JobProgressReport, to the DescribeFHIRImportJob and ListFHIRImportJobs API operation. JobProgressReport provides details on the progress of the import job on the server. +* api-change:``polly``: Amazon Polly adds 1 new voice - Burcu (tr-TR) +* api-change:``sagemaker``: This release adds a new API UpdateClusterSoftware for SageMaker HyperPod. This API allows users to patch HyperPod clusters with latest platform softwares. +* api-change:``qbusiness``: This release adds the metadata-boosting feature, which allows customers to easily fine-tune the underlying ranking of retrieved RAG passages in order to optimize Q&A answer relevance. It also adds new feedback reasons for the PutFeedback API. +* api-change:``guardduty``: Marked fields IpAddressV4, PrivateIpAddress, Email as Sensitive. +* api-change:``controltower``: Adds support for new Baseline and EnabledBaseline APIs for automating multi-account governance. + + +2.15.20 +======= + +* api-change:``appsync``: Adds support for new options on GraphqlAPIs, Resolvers and Data Sources for emitting Amazon CloudWatch metrics for enhanced monitoring of AppSync APIs. +* api-change:``neptune-graph``: Adding a new option "parameters" for data plane api ExecuteQuery to support running parameterized query via SDK. +* api-change:``pricing``: Add Throttling Exception to all APIs. +* api-change:``marketplace-catalog``: AWS Marketplace Catalog API now supports setting intent on requests +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version +* api-change:``cost-optimization-hub``: Adding includeMemberAccounts field to the response of ListEnrollmentStatuses API. +* api-change:``braket``: Creating a job will result in DeviceOfflineException when using an offline device, and DeviceRetiredException when using a retired device. +* api-change:``securitylake``: Documentation updates for Security Lake +* api-change:``resource-explorer-2``: Resource Explorer now uses newly supported IPv4 'amazonaws.com' endpoints by default. +* api-change:``amp``: Overall documentation updates. +* api-change:``cloudwatch``: Update cloudwatch command to latest version +* api-change:``route53domains``: This release adds bill contact support for RegisterDomain, TransferDomain, UpdateDomainContact and GetDomainDetail API. +* api-change:``ecs``: Documentation only update for Amazon ECS. +* api-change:``lightsail``: This release adds support to upgrade the major version of a database. +* api-change:``iot``: This release allows AWS IoT Core users to enable Online Certificate Status Protocol (OCSP) Stapling for TLS X.509 Server Certificates when creating and updating AWS IoT Domain Configurations with Custom Domain. +* api-change:``batch``: This feature allows Batch to support configuration of repository credentials for jobs running on ECS + + +2.15.19 +======= + +* api-change:``quicksight``: General Interactions for Visuals; Waterfall Chart Color Configuration; Documentation Update +* api-change:``workspaces``: This release introduces User-Decoupling feature. This feature allows Workspaces Core customers to provision workspaces without providing users. CreateWorkspaces and DescribeWorkspaces APIs will now take a new optional parameter "WorkspaceName". +* api-change:``datasync``: AWS DataSync now supports manifests for specifying files or objects to transfer. +* api-change:``codepipeline``: Add ability to execute pipelines with new parallel & queued execution modes and add support for triggers with filtering on branches and file paths. +* api-change:``redshift``: LisRecommendations API to fetch Amazon Redshift Advisor recommendations. +* api-change:``lexv2-models``: Update lexv2-models command to latest version + + +2.15.18 +======= + +* api-change:``ecs``: This release is a documentation only update to address customer issues. +* api-change:``workspaces``: Added definitions of various WorkSpace states +* api-change:``es``: This release adds clear visibility to the customers on the changes that they make on the domain. +* api-change:``dynamodb``: Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. +* api-change:``sagemaker``: Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings. +* api-change:``appsync``: Support for environment variables in AppSync GraphQL APIs +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version +* enhancement:dependency: Update ``flit_core`` version range ceiling to 3.9.0 +* api-change:``logs``: This release adds a new field, logGroupArn, to the response of the logs:DescribeLogGroups action. +* api-change:``wafv2``: You can now delete an API key that you've created for use with your CAPTCHA JavaScript integration API. +* api-change:``opensearch``: This release adds clear visibility to the customers on the changes that they make on the domain. +* api-change:``glue``: Introduce Catalog Encryption Role within Glue Data Catalog Settings. Introduce SASL/PLAIN as an authentication method for Glue Kafka connections + + +2.15.17 +======= + +* api-change:``ssm``: This release adds an optional Duration parameter to StateManager Associations. This allows customers to specify how long an apply-only-on-cron association execution should run. Once the specified Duration is out all the ongoing cancellable commands or automations are cancelled. +* api-change:``neptune-graph``: Adding new APIs in SDK for Amazon Neptune Analytics. These APIs include operations to execute, cancel, list queries and get the graph summary. +* api-change:``cognito-idp``: Added CreateIdentityProvider and UpdateIdentityProvider details for new SAML IdP features +* api-change:``elbv2``: Update elbv2 command to latest version +* api-change:``glue``: Update page size limits for GetJobRuns and GetTriggers APIs. +* api-change:``managedblockchain-query``: This release adds support for transactions that have not reached finality. It also removes support for the status property from the response of the GetTransaction operation. You can use the confirmationStatus and executionStatus properties to determine the status of the transaction. +* api-change:``cloudformation``: CloudFormation IaC generator allows you to scan existing resources in your account and select resources to generate a template for a new or existing CloudFormation stack. +* api-change:``ivs``: This release introduces a new resource Playback Restriction Policy which can be used to geo-restrict or domain-restrict channel stream playback when associated with a channel. New APIs to support this resource were introduced in the form of Create/Delete/Get/Update/List. +* api-change:``mediaconvert``: This release includes support for broadcast-mixed audio description tracks. + + +2.15.16 +======= + +* api-change:``mwaa``: This release adds MAINTENANCE environment status for Amazon MWAA environments. +* api-change:``rds``: Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS RestoreDBClusterFromSnapshot and RestoreDBClusterToPointInTime API methods. This provides enhanced error handling, ensuring a more robust experience. +* api-change:``inspector2``: This release adds ECR container image scanning based on their lastRecordedPullTime. +* api-change:``connect``: Update list and string length limits for predefined attributes. +* api-change:``sagemaker``: Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs. +* api-change:``comprehend``: Comprehend PII analysis now supports Spanish input documents. +* api-change:``route53``: Update the SDKs for text changes in the APIs. +* api-change:``snowball``: Modified description of createaddress to include direction to add path when providing a JSON file. +* api-change:``ec2``: EC2 Fleet customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. +* api-change:``datazone``: Add new skipDeletionCheck to DeleteDomain. Add new skipDeletionCheck to DeleteProject which also automatically deletes dependent objects +* api-change:``autoscaling``: EC2 Auto Scaling customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. + + +2.15.15 +======= + +* api-change:``rds``: This release adds support for Aurora Limitless Database. +* api-change:``storagegateway``: Add DeprecationDate and SoftwareVersion to response of ListGateways. +* api-change:``ec2``: Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs. +* api-change:``acm-pca``: AWS Private CA now supports an option to omit the CDP extension from issued certificates, when CRL revocation is enabled. +* api-change:``lightsail``: This release adds support for IPv6-only instance plans. +* api-change:``outposts``: DeviceSerialNumber parameter is now optional in StartConnection API +* api-change:``ecs``: Documentation updates for Amazon ECS. + + +2.15.14 +======= + +* api-change:``cloudfront-keyvaluestore``: This release improves upon the DescribeKeyValueStore API by returning two additional fields, Status of the KeyValueStore and the FailureReason in case of failures during creation of KeyValueStore. +* api-change:``codebuild``: Release CodeBuild Reserved Capacity feature +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version +* api-change:``cloud9``: Doc-only update around removing AL1 from list of available AMIs for Cloud9 +* api-change:``qconnect``: Increased Quick Response name max length to 100 +* api-change:``ec2``: Documentation updates for Amazon EC2. +* api-change:``appconfigdata``: Fix FIPS Endpoints in aws-us-gov. +* api-change:``ecs``: This release adds support for Transport Layer Security (TLS) and Configurable Timeout to ECS Service Connect. TLS facilitates privacy and data security for inter-service communications, while Configurable Timeout allows customized per-request timeout and idle timeout for Service Connect services. +* api-change:``organizations``: Doc only update for quota increase change +* api-change:``athena``: Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown. +* api-change:``dynamodb``: This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API. +* api-change:``connectcases``: This release adds the ability to view audit history on a case and introduces a new parameter, performedBy, for CreateCase and UpdateCase API's. +* api-change:``inspector2``: This release adds support for CIS scans on EC2 instances. +* api-change:``rds``: Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS CreateDBCluster API method. This provides enhanced error handling, ensuring a more robust experience when creating database clusters with insufficient instance capacity. +* api-change:``finspace``: Allow customer to set zip default through command line arguments. + + +2.15.13 +======= + +* bugfix:``s3 sync``: Disable S3 Express support for s3 sync command + + +2.15.12 +======= + +* api-change:``keyspaces``: This release adds support for Multi-Region Replication with provisioned tables, and Keyspaces auto scaling APIs +* api-change:``b2bi``: Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs. +* api-change:``connect``: GetMetricDataV2 now supports 3 groupings +* api-change:``cloudtrail``: This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights. +* api-change:``dynamodb``: Updating note for enabling streams for UpdateTable. +* api-change:``firehose``: Allow support for Snowflake as a Kinesis Data Firehose delivery destination. +* api-change:``sagemaker-featurestore-runtime``: Increase BatchGetRecord limits from 10 items to 100 items +* api-change:``drs``: Removed invalid and unnecessary default values. + + +2.15.11 +======= + +* api-change:``iot``: Revert release of LogTargetTypes +* api-change:``rekognition``: This release adds ContentType and TaxonomyLevel attributes to DetectModerationLabels and GetMediaAnalysisJob API responses. +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version +* api-change:``location``: Location SDK documentation update. Added missing fonts to the MapConfiguration data type. Updated note for the SubMunicipality property in the place data type. +* api-change:``supplychain``: This release includes APIs CreateBillOfMaterialsImportJob and GetBillOfMaterialsImportJob. +* api-change:``s3control``: S3 On Outposts team adds dualstack endpoints support for S3Control and S3Outposts API calls. +* api-change:``securityhub``: Documentation updates for AWS Security Hub +* api-change:``sagemaker``: This release will have ValidationException thrown if certain invalid app types are provided. The release will also throw ValidationException if more than 10 account ids are provided in VpcOnlyTrustedAccounts. +* api-change:``mwaa``: This Amazon MWAA feature release includes new fields in CreateWebLoginToken response model. The new fields IamIdentity and AirflowIdentity will let you match identifications, as the Airflow identity length is currently hashed to 64 characters. +* api-change:``transfer``: AWS Transfer Family now supports static IP addresses for SFTP & AS2 connectors and for async MDNs on AS2 servers. +* api-change:``personalize-runtime``: Documentation updates for Amazon Personalize +* api-change:``personalize``: Documentation updates for Amazon Personalize. +* api-change:``connect``: Supervisor Barge for Chat is now supported through the MonitorContact API. +* api-change:``connectparticipant``: Introduce new Supervisor participant role +* api-change:``iotfleetwise``: Updated APIs: SignalNodeType query parameter has been added to ListSignalCatalogNodesRequest and ListVehiclesResponse has been extended with attributes field. +* api-change:``payment-cryptography``: Provide an additional option for key exchange using RSA wrap/unwrap in addition to tr-34/tr-31 in ImportKey and ExportKey operations. Added new key usage (type) TR31_M1_ISO_9797_1_MAC_KEY, for use with Generate/VerifyMac dataplane operations with ISO9797 Algorithm 1 MAC calculations. +* api-change:``macie2``: This release adds support for analyzing Amazon S3 objects that are encrypted using dual-layer server-side encryption with AWS KMS keys (DSSE-KMS). It also adds support for reporting DSSE-KMS details in statistics and metadata about encryption settings for S3 buckets and objects. + + +2.15.10 +======= + +* api-change:``iot``: Add ConflictException to Update APIs of AWS IoT Software Package Catalog +* api-change:``ecs``: This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. +* api-change:``workspaces``: Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update +* api-change:``iotfleetwise``: The following dataTypes have been removed: CUSTOMER_DECODED_INTERFACE in NetworkInterfaceType; CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL in SignalDecoderFailureReason; CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL in NetworkInterfaceFailureReason; CUSTOMER_DECODED_SIGNAL in SignalDecoderType +* api-change:``ec2``: This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. +* api-change:``secretsmanager``: Doc only update for Secrets Manager +* api-change:``events``: Update events command to latest version +* api-change:``connectcampaigns``: Minor pattern updates for Campaign and Dial Request API fields. +* api-change:``route53``: Route53 now supports geoproximity routing in AWS regions +* api-change:``logs``: Add support for account level subscription filter policies to PutAccountPolicy, DescribeAccountPolicies, and DeleteAccountPolicy APIs. Additionally, PutAccountPolicy has been modified with new optional "selectionCriteria" parameter for resource selection. +* api-change:``location``: This release adds API support for custom layers for the maps service APIs: CreateMap, UpdateMap, DescribeMap. +* api-change:``wisdom``: QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. +* api-change:``redshift-serverless``: Updates to ConfigParameter for RSS workgroup, removal of use_fips_ssl +* api-change:``qconnect``: QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. + + 2.15.9 ====== @@ -6776,6 +6985,304 @@ * feature:wizard: Added support for AWS CLI Wizards. See `#3752 `__. +1.32.46 +======= + +* api-change:``dynamodb``: Publishing quick fix for doc only update. +* api-change:``firehose``: This release updates a few Firehose related APIs. +* api-change:``lambda``: Add .NET 8 (dotnet8) Runtime support to AWS Lambda. + + +1.32.45 +======= + +* api-change:``amplify``: This release contains API changes that enable users to configure their Amplify domains with their own custom SSL/TLS certificate. +* api-change:``chatbot``: This release adds support for AWS Chatbot. You can now monitor, operate, and troubleshoot your AWS resources with interactive ChatOps using the AWS SDK. +* api-change:``config``: Documentation updates for the AWS Config CLI +* api-change:``ivs``: Changed description for latencyMode in Create/UpdateChannel and Channel/ChannelSummary. +* api-change:``keyspaces``: Documentation updates for Amazon Keyspaces +* api-change:``mediatailor``: MediaTailor: marking #AdBreak.OffsetMillis as required. + + +1.32.44 +======= + +* api-change:``connectparticipant``: Doc only update to GetTranscript API reference guide to inform users about presence of events in the chat transcript. +* api-change:``emr``: adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce +* api-change:``firehose``: This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations. +* api-change:``lambda``: Documentation-only updates for Lambda to clarify a number of existing actions and properties. +* api-change:``rds``: Doc only update for a valid option in DB parameter group +* api-change:``sns``: This release marks phone numbers as sensitive inputs. + + +1.32.43 +======= + +* api-change:``artifact``: This is the initial SDK release for AWS Artifact. AWS Artifact provides on-demand access to compliance and third-party compliance reports. This release includes access to List and Get reports, along with their metadata. This release also includes access to AWS Artifact notifications settings. +* api-change:``codepipeline``: Add ability to override timeout on action level. +* api-change:``detective``: Doc only updates for content enhancement +* api-change:``guardduty``: Marked fields IpAddressV4, PrivateIpAddress, Email as Sensitive. +* api-change:``healthlake``: This release adds a new response parameter, JobProgressReport, to the DescribeFHIRImportJob and ListFHIRImportJobs API operation. JobProgressReport provides details on the progress of the import job on the server. +* api-change:``opensearch``: Adds additional supported instance types. +* api-change:``polly``: Amazon Polly adds 1 new voice - Burcu (tr-TR) +* api-change:``sagemaker``: This release adds a new API UpdateClusterSoftware for SageMaker HyperPod. This API allows users to patch HyperPod clusters with latest platform softwares. +* api-change:``secretsmanager``: Doc only update for Secrets Manager +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version + + +1.32.42 +======= + +* api-change:``controltower``: Adds support for new Baseline and EnabledBaseline APIs for automating multi-account governance. +* api-change:``lookoutequipment``: This feature allows customers to see pointwise model diagnostics results for their models. +* api-change:``qbusiness``: This release adds the metadata-boosting feature, which allows customers to easily fine-tune the underlying ranking of retrieved RAG passages in order to optimize Q&A answer relevance. It also adds new feedback reasons for the PutFeedback API. + + +1.32.41 +======= + +* api-change:``lightsail``: This release adds support to upgrade the major version of a database. +* api-change:``marketplace-catalog``: AWS Marketplace Catalog API now supports setting intent on requests +* api-change:``resource-explorer-2``: Resource Explorer now uses newly supported IPv4 'amazonaws.com' endpoints by default. +* api-change:``securitylake``: Documentation updates for Security Lake +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version + + +1.32.40 +======= + +* api-change:``appsync``: Adds support for new options on GraphqlAPIs, Resolvers and Data Sources for emitting Amazon CloudWatch metrics for enhanced monitoring of AppSync APIs. +* api-change:``cloudwatch``: Update cloudwatch command to latest version +* api-change:``neptune-graph``: Adding a new option "parameters" for data plane api ExecuteQuery to support running parameterized query via SDK. +* api-change:``route53domains``: This release adds bill contact support for RegisterDomain, TransferDomain, UpdateDomainContact and GetDomainDetail API. + + +1.32.39 +======= + +* api-change:``amp``: Overall documentation updates. +* api-change:``batch``: This feature allows Batch to support configuration of repository credentials for jobs running on ECS +* api-change:``braket``: Creating a job will result in DeviceOfflineException when using an offline device, and DeviceRetiredException when using a retired device. +* api-change:``cost-optimization-hub``: Adding includeMemberAccounts field to the response of ListEnrollmentStatuses API. +* api-change:``ecs``: Documentation only update for Amazon ECS. +* api-change:``iot``: This release allows AWS IoT Core users to enable Online Certificate Status Protocol (OCSP) Stapling for TLS X.509 Server Certificates when creating and updating AWS IoT Domain Configurations with Custom Domain. +* api-change:``pricing``: Add Throttling Exception to all APIs. + + +1.32.38 +======= + +* api-change:``codepipeline``: Add ability to execute pipelines with new parallel & queued execution modes and add support for triggers with filtering on branches and file paths. +* api-change:``quicksight``: General Interactions for Visuals; Waterfall Chart Color Configuration; Documentation Update +* api-change:``workspaces``: This release introduces User-Decoupling feature. This feature allows Workspaces Core customers to provision workspaces without providing users. CreateWorkspaces and DescribeWorkspaces APIs will now take a new optional parameter "WorkspaceName". + + +1.32.37 +======= + +* api-change:``datasync``: AWS DataSync now supports manifests for specifying files or objects to transfer. +* api-change:``lexv2-models``: Update lexv2-models command to latest version +* api-change:``redshift``: LisRecommendations API to fetch Amazon Redshift Advisor recommendations. + + +1.32.36 +======= + +* api-change:``appsync``: Support for environment variables in AppSync GraphQL APIs +* api-change:``ecs``: This release is a documentation only update to address customer issues. +* api-change:``es``: This release adds clear visibility to the customers on the changes that they make on the domain. +* api-change:``logs``: This release adds a new field, logGroupArn, to the response of the logs:DescribeLogGroups action. +* api-change:``opensearch``: This release adds clear visibility to the customers on the changes that they make on the domain. +* api-change:``wafv2``: You can now delete an API key that you've created for use with your CAPTCHA JavaScript integration API. + + +1.32.35 +======= + +* api-change:``glue``: Introduce Catalog Encryption Role within Glue Data Catalog Settings. Introduce SASL/PLAIN as an authentication method for Glue Kafka connections +* api-change:``workspaces``: Added definitions of various WorkSpace states + + +1.32.34 +======= + +* api-change:``dynamodb``: Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account. +* api-change:``sagemaker``: Amazon SageMaker Canvas adds GenerativeAiSettings support for CanvasAppSettings. +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version + + +1.32.33 +======= + +* api-change:``cognito-idp``: Added CreateIdentityProvider and UpdateIdentityProvider details for new SAML IdP features +* api-change:``ivs``: This release introduces a new resource Playback Restriction Policy which can be used to geo-restrict or domain-restrict channel stream playback when associated with a channel. New APIs to support this resource were introduced in the form of Create/Delete/Get/Update/List. +* api-change:``managedblockchain-query``: This release adds support for transactions that have not reached finality. It also removes support for the status property from the response of the GetTransaction operation. You can use the confirmationStatus and executionStatus properties to determine the status of the transaction. +* api-change:``mediaconvert``: This release includes support for broadcast-mixed audio description tracks. +* api-change:``neptune-graph``: Adding new APIs in SDK for Amazon Neptune Analytics. These APIs include operations to execute, cancel, list queries and get the graph summary. + + +1.32.32 +======= + +* api-change:``cloudformation``: CloudFormation IaC generator allows you to scan existing resources in your account and select resources to generate a template for a new or existing CloudFormation stack. +* api-change:``elbv2``: Update elbv2 command to latest version +* api-change:``glue``: Update page size limits for GetJobRuns and GetTriggers APIs. +* api-change:``ssm``: This release adds an optional Duration parameter to StateManager Associations. This allows customers to specify how long an apply-only-on-cron association execution should run. Once the specified Duration is out all the ongoing cancellable commands or automations are cancelled. + + +1.32.31 +======= + +* api-change:``datazone``: Add new skipDeletionCheck to DeleteDomain. Add new skipDeletionCheck to DeleteProject which also automatically deletes dependent objects +* api-change:``route53``: Update the SDKs for text changes in the APIs. + + +1.32.30 +======= + +* api-change:``autoscaling``: EC2 Auto Scaling customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. +* api-change:``comprehend``: Comprehend PII analysis now supports Spanish input documents. +* api-change:``ec2``: EC2 Fleet customers who use attribute based instance-type selection can now intuitively define their Spot instances price protection limit as a percentage of the lowest priced On-Demand instance type. +* api-change:``mwaa``: This release adds MAINTENANCE environment status for Amazon MWAA environments. +* api-change:``rds``: Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS RestoreDBClusterFromSnapshot and RestoreDBClusterToPointInTime API methods. This provides enhanced error handling, ensuring a more robust experience. +* api-change:``snowball``: Modified description of createaddress to include direction to add path when providing a JSON file. + + +1.32.29 +======= + +* api-change:``connect``: Update list and string length limits for predefined attributes. +* api-change:``inspector2``: This release adds ECR container image scanning based on their lastRecordedPullTime. +* api-change:``sagemaker``: Amazon SageMaker Automatic Model Tuning now provides an API to programmatically delete tuning jobs. + + +1.32.28 +======= + +* api-change:``acm-pca``: AWS Private CA now supports an option to omit the CDP extension from issued certificates, when CRL revocation is enabled. +* api-change:``lightsail``: This release adds support for IPv6-only instance plans. + + +1.32.27 +======= + +* api-change:``ec2``: Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs. +* api-change:``ecs``: Documentation updates for Amazon ECS. +* api-change:``outposts``: DeviceSerialNumber parameter is now optional in StartConnection API +* api-change:``rds``: This release adds support for Aurora Limitless Database. +* api-change:``storagegateway``: Add DeprecationDate and SoftwareVersion to response of ListGateways. + + +1.32.26 +======= + +* api-change:``inspector2``: This release adds support for CIS scans on EC2 instances. + + +1.32.25 +======= + +* bugfix:``s3 sync``: Disable S3 Express support for s3 sync command + + +1.32.24 +======= + +* api-change:``appconfigdata``: Fix FIPS Endpoints in aws-us-gov. +* api-change:``cloud9``: Doc-only update around removing AL1 from list of available AMIs for Cloud9 +* api-change:``cloudfront-keyvaluestore``: This release improves upon the DescribeKeyValueStore API by returning two additional fields, Status of the KeyValueStore and the FailureReason in case of failures during creation of KeyValueStore. +* api-change:``connectcases``: This release adds the ability to view audit history on a case and introduces a new parameter, performedBy, for CreateCase and UpdateCase API's. +* api-change:``ec2``: Documentation updates for Amazon EC2. +* api-change:``ecs``: This release adds support for Transport Layer Security (TLS) and Configurable Timeout to ECS Service Connect. TLS facilitates privacy and data security for inter-service communications, while Configurable Timeout allows customized per-request timeout and idle timeout for Service Connect services. +* api-change:``finspace``: Allow customer to set zip default through command line arguments. +* api-change:``organizations``: Doc only update for quota increase change +* api-change:``rds``: Introduced support for the InsufficientDBInstanceCapacityFault error in the RDS CreateDBCluster API method. This provides enhanced error handling, ensuring a more robust experience when creating database clusters with insufficient instance capacity. +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version + + +1.32.23 +======= + +* api-change:``athena``: Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown. +* api-change:``codebuild``: Release CodeBuild Reserved Capacity feature +* api-change:``dynamodb``: This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API. +* api-change:``qconnect``: Increased Quick Response name max length to 100 + + +1.32.22 +======= + +* api-change:``b2bi``: Increasing TestMapping inputFileContent file size limit to 5MB and adding file size limit 250KB for TestParsing input file. This release also includes exposing InternalServerException for Tag APIs. +* api-change:``cloudtrail``: This release adds a new API ListInsightsMetricData to retrieve metric data from CloudTrail Insights. +* api-change:``connect``: GetMetricDataV2 now supports 3 groupings +* api-change:``drs``: Removed invalid and unnecessary default values. +* api-change:``firehose``: Allow support for Snowflake as a Kinesis Data Firehose delivery destination. +* api-change:``sagemaker-featurestore-runtime``: Increase BatchGetRecord limits from 10 items to 100 items + + +1.32.21 +======= + +* api-change:``dynamodb``: Updating note for enabling streams for UpdateTable. +* api-change:``keyspaces``: This release adds support for Multi-Region Replication with provisioned tables, and Keyspaces auto scaling APIs + + +1.32.20 +======= + +* api-change:``iot``: Revert release of LogTargetTypes +* api-change:``iotfleetwise``: Updated APIs: SignalNodeType query parameter has been added to ListSignalCatalogNodesRequest and ListVehiclesResponse has been extended with attributes field. +* api-change:``macie2``: This release adds support for analyzing Amazon S3 objects that are encrypted using dual-layer server-side encryption with AWS KMS keys (DSSE-KMS). It also adds support for reporting DSSE-KMS details in statistics and metadata about encryption settings for S3 buckets and objects. +* api-change:``payment-cryptography``: Provide an additional option for key exchange using RSA wrap/unwrap in addition to tr-34/tr-31 in ImportKey and ExportKey operations. Added new key usage (type) TR31_M1_ISO_9797_1_MAC_KEY, for use with Generate/VerifyMac dataplane operations with ISO9797 Algorithm 1 MAC calculations. +* api-change:``personalize-runtime``: Documentation updates for Amazon Personalize +* api-change:``personalize``: Documentation updates for Amazon Personalize. +* api-change:``rekognition``: This release adds ContentType and TaxonomyLevel attributes to DetectModerationLabels and GetMediaAnalysisJob API responses. +* api-change:``securityhub``: Documentation updates for AWS Security Hub + + +1.32.19 +======= + +* api-change:``sagemaker``: This release will have ValidationException thrown if certain invalid app types are provided. The release will also throw ValidationException if more than 10 account ids are provided in VpcOnlyTrustedAccounts. + + +1.32.18 +======= + +* api-change:``connect``: Supervisor Barge for Chat is now supported through the MonitorContact API. +* api-change:``connectparticipant``: Introduce new Supervisor participant role +* api-change:``location``: Location SDK documentation update. Added missing fonts to the MapConfiguration data type. Updated note for the SubMunicipality property in the place data type. +* api-change:``mwaa``: This Amazon MWAA feature release includes new fields in CreateWebLoginToken response model. The new fields IamIdentity and AirflowIdentity will let you match identifications, as the Airflow identity length is currently hashed to 64 characters. +* api-change:``s3control``: S3 On Outposts team adds dualstack endpoints support for S3Control and S3Outposts API calls. +* api-change:``supplychain``: This release includes APIs CreateBillOfMaterialsImportJob and GetBillOfMaterialsImportJob. +* api-change:``transfer``: AWS Transfer Family now supports static IP addresses for SFTP & AS2 connectors and for async MDNs on AS2 servers. +* api-change:``endpoint-rules``: Update endpoint-rules command to latest version + + +1.32.17 +======= + +* api-change:``ec2``: This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. +* api-change:``ecs``: This release adds support for adding an ElasticBlockStorage volume configurations in ECS RunTask/StartTask/CreateService/UpdateService APIs. The configuration allows for attaching EBS volumes to ECS Tasks. +* api-change:``events``: Update events command to latest version +* api-change:``iot``: Add ConflictException to Update APIs of AWS IoT Software Package Catalog +* api-change:``iotfleetwise``: The following dataTypes have been removed: CUSTOMER_DECODED_INTERFACE in NetworkInterfaceType; CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL in SignalDecoderFailureReason; CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL in NetworkInterfaceFailureReason; CUSTOMER_DECODED_SIGNAL in SignalDecoderType +* api-change:``secretsmanager``: Doc only update for Secrets Manager +* api-change:``workspaces``: Added AWS Workspaces RebootWorkspaces API - Extended Reboot documentation update + + +1.32.16 +======= + +* api-change:``connectcampaigns``: Minor pattern updates for Campaign and Dial Request API fields. +* api-change:``location``: This release adds API support for custom layers for the maps service APIs: CreateMap, UpdateMap, DescribeMap. +* api-change:``logs``: Add support for account level subscription filter policies to PutAccountPolicy, DescribeAccountPolicies, and DeleteAccountPolicy APIs. Additionally, PutAccountPolicy has been modified with new optional "selectionCriteria" parameter for resource selection. +* api-change:``qconnect``: QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. +* api-change:``redshift-serverless``: Updates to ConfigParameter for RSS workgroup, removal of use_fips_ssl +* api-change:``route53``: Route53 now supports geoproximity routing in AWS regions +* api-change:``wisdom``: QueryAssistant and GetRecommendations will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications. + + 1.32.15 ======= diff -Nru awscli-2.15.9/awscli/__init__.py awscli-2.15.22/awscli/__init__.py --- awscli-2.15.9/awscli/__init__.py 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/__init__.py 2024-02-21 17:34:54.000000000 +0000 @@ -19,7 +19,7 @@ import importlib.abc import sys -__version__ = '2.15.9' +__version__ = '2.15.22' # # Get our data path to be added to botocore's search path diff -Nru awscli-2.15.9/awscli/botocore/data/acm-pca/2017-08-22/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/acm-pca/2017-08-22/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/acm-pca/2017-08-22/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/acm-pca/2017-08-22/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/acm-pca/2017-08-22/service-2.json awscli-2.15.22/awscli/botocore/data/acm-pca/2017-08-22/service-2.json --- awscli-2.15.9/awscli/botocore/data/acm-pca/2017-08-22/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/acm-pca/2017-08-22/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -551,7 +551,7 @@ "type":"string", "max":200, "min":5, - "pattern":"arn:[\\w+=/,.@-]+:[\\w+=/,.@-]+:[\\w+=/,.@-]*:[0-9]*:[\\w+=,.@-]+(/[\\w+=,.@-]+)*" + "pattern":"arn:[\\w+=/,.@-]+:acm-pca:[\\w+=/,.@-]*:[0-9]*:[\\w+=,.@-]+(/[\\w+=,.@-]+)*" }, "AuditReportId":{ "type":"string", @@ -882,9 +882,25 @@ "S3ObjectAcl":{ "shape":"S3ObjectAcl", "documentation":"

Determines whether the CRL will be publicly readable or privately held in the CRL Amazon S3 bucket. If you choose PUBLIC_READ, the CRL will be accessible over the public internet. If you choose BUCKET_OWNER_FULL_CONTROL, only the owner of the CRL S3 bucket can access the CRL, and your PKI clients may need an alternative method of access.

If no value is specified, the default is PUBLIC_READ.

Note: This default can cause CA creation to fail in some circumstances. If you have have enabled the Block Public Access (BPA) feature in your S3 account, then you must specify the value of this parameter as BUCKET_OWNER_FULL_CONTROL, and not doing so results in an error. If you have disabled BPA in S3, then you can specify either BUCKET_OWNER_FULL_CONTROL or PUBLIC_READ as the value.

For more information, see Blocking public access to the S3 bucket.

" + }, + "CrlDistributionPointExtensionConfiguration":{ + "shape":"CrlDistributionPointExtensionConfiguration", + "documentation":"

Configures the behavior of the CRL Distribution Point extension for certificates issued by your certificate authority. If this field is not provided, then the CRl Distribution Point Extension will be present and contain the default CRL URL.

" + } + }, + "documentation":"

Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA by default copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. If you want to configure this default behavior to be something different, you can set the CrlDistributionPointExtensionConfiguration parameter. Your S3 bucket policy must give write permission to Amazon Web Services Private CA.

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs.

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a certificate's expiration date or when a certificate is revoked. When a certificate is revoked, it appears in the CRL until the certificate expires, and then in one additional CRL after expiration, and it always appears in the audit report.

A CRL is typically updated approximately 30 minutes after a certificate is revoked. If for any reason a CRL update fails, Amazon Web Services Private CA makes further attempts every 15 minutes.

CRLs contain the following fields:

Certificate revocation lists created by Amazon Web Services Private CA are DER-encoded. You can use the following OpenSSL command to list a CRL.

openssl crl -inform DER -text -in crl_path -noout

For more information, see Planning a certificate revocation list (CRL) in the Amazon Web Services Private Certificate Authority User Guide

" + }, + "CrlDistributionPointExtensionConfiguration":{ + "type":"structure", + "required":["OmitExtension"], + "members":{ + "OmitExtension":{ + "shape":"Boolean", + "documentation":"

Configures whether the CRL Distribution Point extension should be populated with the default URL to the CRL. If set to true, then the CDP extension will not be present in any certificates issued by that CA unless otherwise specified through CSR or API passthrough.

Only set this if you have another way to distribute the CRL Distribution Points ffor certificates issued by your CA, such as the Matter Distributed Compliance Ledger

This configuration cannot be enabled with a custom CNAME set.

", + "box":true } }, - "documentation":"

Contains configuration information for a certificate revocation list (CRL). Your private certificate authority (CA) creates base CRLs. Delta CRLs are not supported. You can enable CRLs for your new or an existing private CA by setting the Enabled parameter to true. Your private CA writes CRLs to an S3 bucket that you specify in the S3BucketName parameter. You can hide the name of your bucket by specifying a value for the CustomCname parameter. Your private CA copies the CNAME or the S3 bucket name to the CRL Distribution Points extension of each certificate it issues. Your S3 bucket policy must give write permission to Amazon Web Services Private CA.

Amazon Web Services Private CA assets that are stored in Amazon S3 can be protected with encryption. For more information, see Encrypting Your CRLs.

Your private CA uses the value in the ExpirationInDays parameter to calculate the nextUpdate field in the CRL. The CRL is refreshed prior to a certificate's expiration date or when a certificate is revoked. When a certificate is revoked, it appears in the CRL until the certificate expires, and then in one additional CRL after expiration, and it always appears in the audit report.

A CRL is typically updated approximately 30 minutes after a certificate is revoked. If for any reason a CRL update fails, Amazon Web Services Private CA makes further attempts every 15 minutes.

CRLs contain the following fields:

Certificate revocation lists created by Amazon Web Services Private CA are DER-encoded. You can use the following OpenSSL command to list a CRL.

openssl crl -inform DER -text -in crl_path -noout

For more information, see Planning a certificate revocation list (CRL) in the Amazon Web Services Private Certificate Authority User Guide

" + "documentation":"

Contains configuration information for the default behavior of the CRL Distribution Point (CDP) extension in certificates issued by your CA. This extension contains a link to download the CRL, so you can check whether a certificate has been revoked. To choose whether you want this extension omitted or not in certificates issued by your CA, you can set the OmitExtension parameter.

" }, "CsrBlob":{ "type":"blob", @@ -927,7 +943,7 @@ "CustomAttributeList":{ "type":"list", "member":{"shape":"CustomAttribute"}, - "max":30, + "max":150, "min":1 }, "CustomExtension":{ @@ -956,7 +972,7 @@ "CustomExtensionList":{ "type":"list", "member":{"shape":"CustomExtension"}, - "max":20, + "max":150, "min":1 }, "CustomObjectIdentifier":{ @@ -1185,7 +1201,7 @@ "GeneralNameList":{ "type":"list", "member":{"shape":"GeneralName"}, - "max":20, + "max":150, "min":1 }, "GetCertificateAuthorityCertificateRequest":{ @@ -1494,7 +1510,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

" + "documentation":"

Use this parameter when paginating results to specify the maximum number of items to return in the response on each page. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

Although the maximum value is 1000, the action only returns a maximum of 100 items.

" }, "ResourceOwner":{ "shape":"ResourceOwner", @@ -1608,7 +1624,7 @@ }, "NextToken":{ "type":"string", - "max":500, + "max":43739, "min":1 }, "OcspConfiguration":{ diff -Nru awscli-2.15.9/awscli/botocore/data/acm-pca/2017-08-22/waiters-2.json awscli-2.15.22/awscli/botocore/data/acm-pca/2017-08-22/waiters-2.json --- awscli-2.15.9/awscli/botocore/data/acm-pca/2017-08-22/waiters-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/acm-pca/2017-08-22/waiters-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -22,7 +22,7 @@ "CertificateIssued": { "description": "Wait until a certificate is issued", "operation": "GetCertificate", - "delay": 3, + "delay": 1, "maxAttempts": 60, "acceptors": [ { diff -Nru awscli-2.15.9/awscli/botocore/data/amp/2020-08-01/service-2.json awscli-2.15.22/awscli/botocore/data/amp/2020-08-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/amp/2020-08-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/amp/2020-08-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -30,7 +30,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Create an alert manager definition.

", + "documentation":"

The CreateAlertManagerDefinition operation creates the alert manager definition in a workspace. If a workspace already has an alert manager definition, don't use this operation to update it. Instead, use PutAlertManagerDefinition.

", "idempotent":true }, "CreateLoggingConfiguration":{ @@ -48,7 +48,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create logging configuration.

", + "documentation":"

The CreateLoggingConfiguration operation creates a logging configuration for the workspace. Use this operation to set the CloudWatch log group to which the logs will be published to.

", "idempotent":true }, "CreateRuleGroupsNamespace":{ @@ -69,7 +69,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Create a rule group namespace.

", + "documentation":"

The CreateRuleGroupsNamespace operation creates a rule groups namespace within a workspace. A rule groups namespace is associated with exactly one rules file. A workspace can have multiple rule groups namespaces.

Use this operation only to create new rule groups namespaces. To update an existing rule groups namespace, use PutRuleGroupsNamespace.

", "idempotent":true }, "CreateScraper":{ @@ -90,7 +90,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Create a scraper.

", + "documentation":"

The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. You can configure the scraper to control what metrics are collected, and what transformations are applied prior to sending them to your workspace.

If needed, an IAM role will be created for you that gives Amazon Managed Service for Prometheus access to the metrics in your cluster. For more information, see Using roles for scraping metrics from EKS in the Amazon Managed Service for Prometheus User Guide.

You cannot update a scraper. If you want to change the configuration of the scraper, create a new scraper and delete the old one.

The scrapeConfiguration parameter contains the base64-encoded version of the YAML configuration file.

For more information about collectors, including what metrics are collected, and how to configure the scraper, see Amazon Web Services managed collectors in the Amazon Managed Service for Prometheus User Guide.

", "idempotent":true }, "CreateWorkspace":{ @@ -110,7 +110,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a new AMP workspace.

", + "documentation":"

Creates a Prometheus workspace. A workspace is a logical space dedicated to the storage and querying of Prometheus metrics. You can have one or more workspaces in each Region in your account.

", "idempotent":true }, "DeleteAlertManagerDefinition":{ @@ -129,7 +129,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an alert manager definition.

", + "documentation":"

Deletes the alert manager definition from a workspace.

", "idempotent":true }, "DeleteLoggingConfiguration":{ @@ -147,7 +147,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Delete logging configuration.

", + "documentation":"

Deletes the logging configuration for a workspace.

", "idempotent":true }, "DeleteRuleGroupsNamespace":{ @@ -166,7 +166,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Delete a rule groups namespace.

", + "documentation":"

Deletes one rule groups namespace and its associated rule groups definition.

", "idempotent":true }, "DeleteScraper":{ @@ -186,7 +186,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes a scraper.

", + "documentation":"

The DeleteScraper operation deletes one scraper, and stops any metrics collection that the scraper performs.

", "idempotent":true }, "DeleteWorkspace":{ @@ -205,7 +205,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an AMP workspace.

", + "documentation":"

Deletes an existing workspace.

When you delete a workspace, the data that has been ingested into it is not immediately deleted. It will be permanently deleted within one month.

", "idempotent":true }, "DescribeAlertManagerDefinition":{ @@ -224,7 +224,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes an alert manager definition.

" + "documentation":"

Retrieves the full information about the alert manager definition for a workspace.

" }, "DescribeLoggingConfiguration":{ "name":"DescribeLoggingConfiguration", @@ -241,7 +241,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes logging configuration.

" + "documentation":"

Returns complete information about the current logging configuration of the workspace.

" }, "DescribeRuleGroupsNamespace":{ "name":"DescribeRuleGroupsNamespace", @@ -259,7 +259,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describe a rule groups namespace.

" + "documentation":"

Returns complete information about one rule groups namespace. To retrieve a list of rule groups namespaces, use ListRuleGroupsNamespaces.

" }, "DescribeScraper":{ "name":"DescribeScraper", @@ -277,7 +277,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describe an existing scraper.

" + "documentation":"

The DescribeScraper operation displays information about an existing scraper.

" }, "DescribeWorkspace":{ "name":"DescribeWorkspace", @@ -295,7 +295,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes an existing AMP workspace.

" + "documentation":"

Returns information about an existing workspace.

" }, "GetDefaultScraperConfiguration":{ "name":"GetDefaultScraperConfiguration", @@ -311,7 +311,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets a default configuration.

" + "documentation":"

The GetDefaultScraperConfiguration operation returns the default scraper configuration used when Amazon EKS creates a scraper for you.

" }, "ListRuleGroupsNamespaces":{ "name":"ListRuleGroupsNamespaces", @@ -329,7 +329,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists rule groups namespaces.

" + "documentation":"

Returns a list of rule groups namespaces in a workspace.

" }, "ListScrapers":{ "name":"ListScrapers", @@ -346,7 +346,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all scrapers in a customer account, including scrapers being created or deleted. You may provide filters to return a more specific list of results.

" + "documentation":"

The ListScrapers operation lists all of the scrapers in your account. This includes scrapers being created or deleted. You can optionally filter the returned list.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -364,7 +364,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the tags you have assigned to the resource.

" + "documentation":"

The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are workspaces and rule groups namespaces.

" }, "ListWorkspaces":{ "name":"ListWorkspaces", @@ -381,7 +381,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all AMP workspaces, including workspaces being created or deleted.

" + "documentation":"

Lists all of the Amazon Managed Service for Prometheus workspaces in your account. This includes workspaces being created or deleted.

" }, "PutAlertManagerDefinition":{ "name":"PutAlertManagerDefinition", @@ -401,7 +401,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Update an alert manager definition.

", + "documentation":"

Updates an existing alert manager definition in a workspace. If the workspace does not already have an alert manager definition, don't use this operation to create it. Instead, use CreateAlertManagerDefinition.

", "idempotent":true }, "PutRuleGroupsNamespace":{ @@ -422,7 +422,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Update a rule groups namespace.

", + "documentation":"

Updates an existing rule groups namespace within a workspace. A rule groups namespace is associated with exactly one rules file. A workspace can have multiple rule groups namespaces.

Use this operation only to update existing rule groups namespaces. To create a new rule groups namespace, use CreateRuleGroupsNamespace.

You can't use this operation to add tags to an existing rule groups namespace. Instead, use TagResource.

", "idempotent":true }, "TagResource":{ @@ -441,7 +441,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates tags for the specified resource.

" + "documentation":"

The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.

If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

" }, "UntagResource":{ "name":"UntagResource", @@ -459,7 +459,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes tags from the specified resource.

", + "documentation":"

Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.

", "idempotent":true }, "UpdateLoggingConfiguration":{ @@ -478,7 +478,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update logging configuration.

", + "documentation":"

Updates the log group ARN or the workspace ID of the current logging configuration.

", "idempotent":true }, "UpdateWorkspaceAlias":{ @@ -498,7 +498,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Updates an AMP workspace alias.

", + "documentation":"

Updates the alias of an existing workspace.

", "idempotent":true } }, @@ -512,7 +512,7 @@ "documentation":"

Description of the error.

" } }, - "documentation":"

User does not have sufficient access to perform this action.

", + "documentation":"

You do not have sufficient access to perform this action.

", "error":{ "httpStatusCode":403, "senderFault":true @@ -521,35 +521,35 @@ }, "AlertManagerDefinitionData":{ "type":"blob", - "documentation":"

The alert manager definition data.

" + "documentation":"

The base-64 encoded blob that is alert manager definition.

For details about the alert manager definition, see AlertManagedDefinitionData.

" }, "AlertManagerDefinitionDescription":{ "type":"structure", "required":[ - "status", - "data", "createdAt", - "modifiedAt" + "data", + "modifiedAt", + "status" ], "members":{ - "status":{ - "shape":"AlertManagerDefinitionStatus", - "documentation":"

The status of alert manager definition.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the alert manager definition was created.

" }, "data":{ "shape":"AlertManagerDefinitionData", - "documentation":"

The alert manager definition.

" - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

The time when the alert manager definition was created.

" + "documentation":"

The actual alert manager definition.

For details about the alert manager definition, see AlertManagedDefinitionData.

" }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

The time when the alert manager definition was modified.

" + "documentation":"

The date and time that the alert manager definition was most recently changed.

" + }, + "status":{ + "shape":"AlertManagerDefinitionStatus", + "documentation":"

A structure that displays the current status of the alert manager definition..

" } }, - "documentation":"

Represents the properties of an alert manager definition.

" + "documentation":"

The details of an alert manager definition.

" }, "AlertManagerDefinitionStatus":{ "type":"structure", @@ -557,18 +557,18 @@ "members":{ "statusCode":{ "shape":"AlertManagerDefinitionStatusCode", - "documentation":"

Status code of this definition.

" + "documentation":"

The current status of the alert manager.

" }, "statusReason":{ "shape":"String", - "documentation":"

The reason for failure if any.

" + "documentation":"

If there is a failure, the reason for the failure.

" } }, - "documentation":"

Represents the status of a definition.

" + "documentation":"

The status of the alert manager.

" }, "AlertManagerDefinitionStatusCode":{ "type":"string", - "documentation":"

State of an alert manager definition.

", + "documentation":"

State of an AlertManagerDefinition.

", "enum":[ "CREATING", "ACTIVE", @@ -584,16 +584,16 @@ "members":{ "workspaceArn":{ "shape":"WorkspaceArn", - "documentation":"

The ARN of an AMP workspace.

" + "documentation":"

ARN of the Amazon Managed Service for Prometheus workspace.

" } }, - "documentation":"

A representation of an AMP destination.

" + "documentation":"

The AmpConfiguration structure defines the Amazon Managed Service for Prometheus instance a scraper should send metrics to.

" }, "Blob":{"type":"blob"}, "ClusterArn":{ "type":"string", "documentation":"

The ARN of an EKS cluster.

", - "pattern":"arn:aws[-a-z]*:eks:[-a-z0-9]+:[0-9]{12}:cluster/.+" + "pattern":"^arn:aws[-a-z]*:eks:[-a-z0-9]+:[0-9]{12}:cluster/.+$" }, "ConflictException":{ "type":"structure", @@ -616,7 +616,7 @@ "documentation":"

Type of the resource affected.

" } }, - "documentation":"

Updating or deleting a resource can cause an inconsistent state.

", + "documentation":"

The request would cause an inconsistent state.

", "error":{ "httpStatusCode":409, "senderFault":true @@ -626,27 +626,27 @@ "CreateAlertManagerDefinitionRequest":{ "type":"structure", "required":[ - "workspaceId", - "data" + "data", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace in which to create the alert manager definition.

", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true }, "data":{ "shape":"AlertManagerDefinitionData", - "documentation":"

The alert manager definition data.

" + "documentation":"

The alert manager definition to add. A base64-encoded version of the YAML alert manager definition file.

For details about the alert manager definition, see AlertManagedDefinitionData.

" }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to add the alert manager definition to.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a CreateAlertManagerDefinition operation.

" + "documentation":"

Represents the input of a CreateAlertManagerDefinition operation.

" }, "CreateAlertManagerDefinitionResponse":{ "type":"structure", @@ -654,35 +654,35 @@ "members":{ "status":{ "shape":"AlertManagerDefinitionStatus", - "documentation":"

The status of alert manager definition.

" + "documentation":"

A structure that displays the current status of the alert manager definition.

" } }, - "documentation":"

Represents the output of a CreateAlertManagerDefinition operation.

" + "documentation":"

Represents the output of a CreateAlertManagerDefinition operation.

" }, "CreateLoggingConfigurationRequest":{ "type":"structure", "required":[ - "workspaceId", - "logGroupArn" + "logGroupArn", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to vend logs to.

", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true }, "logGroupArn":{ "shape":"LogGroupArn", - "documentation":"

The ARN of the CW log group to which the vended log data will be published.

" + "documentation":"

The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this API.

" }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to create the logging configuration for.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a CreateLoggingConfiguration operation.

" + "documentation":"

Represents the input of a CreateLoggingConfiguration operation.

" }, "CreateLoggingConfigurationResponse":{ "type":"structure", @@ -690,277 +690,277 @@ "members":{ "status":{ "shape":"LoggingConfigurationStatus", - "documentation":"

The status of the logging configuration.

" + "documentation":"

A structure that displays the current status of the logging configuration.

" } }, - "documentation":"

Represents the output of a CreateLoggingConfiguration operation.

" + "documentation":"

Represents the output of a CreateLoggingConfiguration operation.

" }, "CreateRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", + "data", "name", - "data" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace in which to create the rule group namespace.

", - "location":"uri", - "locationName":"workspaceId" - }, - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace name.

" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true }, "data":{ "shape":"RuleGroupsNamespaceData", - "documentation":"

The namespace data that define the rule groups.

" + "documentation":"

The rules file to use in the new namespace.

Contains the base64-encoded version of the YAML rules file.

For details about the rule groups namespace structure, see RuleGroupsNamespaceData.

" }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The name for the new rule groups namespace.

" }, "tags":{ "shape":"TagMap", - "documentation":"

Optional, user-provided tags for this rule groups namespace.

" + "documentation":"

The list of tag keys and values to associate with the rule groups namespace.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to add the rule groups namespace.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a CreateRuleGroupsNamespace operation.

" + "documentation":"

Represents the input of a CreateRuleGroupsNamespace operation.

" }, "CreateRuleGroupsNamespaceResponse":{ "type":"structure", "required":[ - "name", "arn", + "name", "status" ], "members":{ - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace name.

" - }, "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" + "documentation":"

The Amazon Resource Name (ARN) of the new rule groups namespace.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The name of the new rule groups namespace.

" }, "status":{ "shape":"RuleGroupsNamespaceStatus", - "documentation":"

The status of rule groups namespace.

" + "documentation":"

A structure that returns the current status of the rule groups namespace.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this rule groups namespace.

" + "documentation":"

The list of tag keys and values that are associated with the namespace.

" } }, - "documentation":"

Represents the output of a CreateRuleGroupsNamespace operation.

" + "documentation":"

Represents the output of a CreateRuleGroupsNamespace operation.

" }, "CreateScraperRequest":{ "type":"structure", "required":[ + "destination", "scrapeConfiguration", - "source", - "destination" + "source" ], "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"

An optional user-assigned alias for this scraper. This alias is for user reference and does not need to be unique.

" + "documentation":"

(optional) a name to associate with the scraper. This is for your use, and does not need to be unique.

" + }, + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

(Optional) A unique, case-sensitive identifier that you can provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "destination":{ + "shape":"Destination", + "documentation":"

The Amazon Managed Service for Prometheus workspace to send metrics to.

" }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", - "documentation":"

The configuration used to create the scraper.

" + "documentation":"

The configuration file to use in the new scraper. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

" }, "source":{ "shape":"Source", - "documentation":"

The source that the scraper will be discovering and collecting metrics from.

" - }, - "destination":{ - "shape":"Destination", - "documentation":"

The destination that the scraper will be producing metrics to.

" - }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true + "documentation":"

The Amazon EKS cluster from which the scraper will collect metrics.

" }, "tags":{ "shape":"TagMap", - "documentation":"

Optional, user-provided tags for this scraper.

" + "documentation":"

(Optional) The list of tag keys and values to associate with the scraper.

" } }, - "documentation":"

Represents the input of a CreateScraper operation.

" + "documentation":"

Represents the input of a CreateScraper operation.

" }, "CreateScraperResponse":{ "type":"structure", "required":[ - "scraperId", "arn", + "scraperId", "status" ], "members":{ - "scraperId":{ - "shape":"ScraperId", - "documentation":"

The generated ID of the scraper that was just created.

" - }, "arn":{ "shape":"ScraperArn", - "documentation":"

The ARN of the scraper that was just created.

" + "documentation":"

The Amazon Resource Name (ARN) of the new scraper.

" + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

The ID of the new scraper.

" }, "status":{ "shape":"ScraperStatus", - "documentation":"

The status of the scraper that was just created (usually CREATING).

" + "documentation":"

A structure that displays the current status of the scraper.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this scraper.

" + "documentation":"

The list of tag keys and values that are associated with the scraper.

" } }, - "documentation":"

Represents the output of a CreateScraper operation.

" + "documentation":"

Represents the output of a CreateScraper operation.

" }, "CreateWorkspaceRequest":{ "type":"structure", "members":{ "alias":{ "shape":"WorkspaceAlias", - "documentation":"

An optional user-assigned alias for this workspace. This alias is for user reference and does not need to be unique.

" + "documentation":"

An alias that you assign to this workspace to help you identify it. It does not need to be unique.

Blank spaces at the beginning or end of the alias that you specify will be trimmed from the value used.

" }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", "idempotencyToken":true }, - "tags":{ - "shape":"TagMap", - "documentation":"

Optional, user-provided tags for this workspace.

" - }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

Optional, customer managed KMS key used to encrypt data for this workspace

" + "documentation":"

(optional) The ARN for a customer managed KMS key to use for encrypting data within your workspace. For more information about using your own key in your workspace, see Encryption at rest in the Amazon Managed Service for Prometheus User Guide.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of tag keys and values to associate with the workspace.

" } }, - "documentation":"

Represents the input of a CreateWorkspace operation.

" + "documentation":"

Represents the input of a CreateWorkspace operation.

" }, "CreateWorkspaceResponse":{ "type":"structure", "required":[ - "workspaceId", "arn", - "status" + "status", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The generated ID of the workspace that was just created.

" - }, "arn":{ "shape":"WorkspaceArn", - "documentation":"

The ARN of the workspace that was just created.

" + "documentation":"

The ARN for the new workspace.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

(optional) If the workspace was created with a customer managed KMS key, the ARN for the key used.

" }, "status":{ "shape":"WorkspaceStatus", - "documentation":"

The status of the workspace that was just created (usually CREATING).

" + "documentation":"

The current status of the new workspace. Immediately after you create the workspace, the status is usually CREATING.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this workspace.

" + "documentation":"

The list of tag keys and values that are associated with the workspace.

" }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

Customer managed KMS key ARN for this workspace

" + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The unique ID for the new workspace.

" } }, - "documentation":"

Represents the output of a CreateWorkspace operation.

" + "documentation":"

Represents the output of a CreateWorkspace operation.

" }, "DeleteAlertManagerDefinitionRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace in which to delete the alert manager definition.

", - "location":"uri", - "locationName":"workspaceId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to delete the alert manager definition from.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DeleteAlertManagerDefinition operation.

" + "documentation":"

Represents the input of a DeleteAlertManagerDefinition operation.

" }, "DeleteLoggingConfigurationRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to vend logs to.

", - "location":"uri", - "locationName":"workspaceId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace containing the logging configuration to delete.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DeleteLoggingConfiguration operation.

" + "documentation":"

Represents the input of a DeleteLoggingConfiguration operation.

" }, "DeleteRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", - "name" + "name", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to delete rule group definition.

", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace name.

", + "documentation":"

The name of the rule groups namespace to delete.

", "location":"uri", "locationName":"name" }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true, - "location":"querystring", - "locationName":"clientToken" + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace containing the rule groups namespace and definition to delete.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DeleteRuleGroupsNamespace operation.

" + "documentation":"

Represents the input of a DeleteRuleGroupsNamespace operation.

" }, "DeleteScraperRequest":{ "type":"structure", "required":["scraperId"], "members":{ - "scraperId":{ - "shape":"ScraperId", - "documentation":"

The ID of the scraper to delete.

", - "location":"uri", - "locationName":"scraperId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "documentation":"

(Optional) A unique, case-sensitive identifier that you can provide to ensure the idempotency of the request.

", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

The ID of the scraper to delete.

", + "location":"uri", + "locationName":"scraperId" } }, - "documentation":"

Represents the input of a DeleteScraper operation.

" + "documentation":"

Represents the input of a DeleteScraper operation.

" }, "DeleteScraperResponse":{ "type":"structure", @@ -971,34 +971,34 @@ "members":{ "scraperId":{ "shape":"ScraperId", - "documentation":"

The ID of the scraper that was deleted.

" + "documentation":"

The ID of the scraper to delete.

" }, "status":{ "shape":"ScraperStatus", - "documentation":"

The status of the scraper that is being deleted.

" + "documentation":"

The current status of the scraper.

" } }, - "documentation":"

Represents the output of a DeleteScraper operation.

" + "documentation":"

Represents the output of a DeleteScraper operation.

" }, "DeleteWorkspaceRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to delete.

", - "location":"uri", - "locationName":"workspaceId" - }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", "idempotencyToken":true, "location":"querystring", "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to delete.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DeleteWorkspace operation.

" + "documentation":"

Represents the input of a DeleteWorkspace operation.

" }, "DescribeAlertManagerDefinitionRequest":{ "type":"structure", @@ -1006,12 +1006,12 @@ "members":{ "workspaceId":{ "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to describe.

", + "documentation":"

The ID of the workspace to retrieve the alert manager definition from.

", "location":"uri", "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DescribeAlertManagerDefinition operation.

" + "documentation":"

Represents the input of a DescribeAlertManagerDefinition operation.

" }, "DescribeAlertManagerDefinitionResponse":{ "type":"structure", @@ -1019,10 +1019,10 @@ "members":{ "alertManagerDefinition":{ "shape":"AlertManagerDefinitionDescription", - "documentation":"

The properties of the selected workspace's alert manager definition.

" + "documentation":"

The alert manager definition.

" } }, - "documentation":"

Represents the output of a DescribeAlertManagerDefinition operation.

" + "documentation":"

Represents the output of a DescribeAlertManagerDefinition operation.

" }, "DescribeLoggingConfigurationRequest":{ "type":"structure", @@ -1030,12 +1030,12 @@ "members":{ "workspaceId":{ "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to vend logs to.

", + "documentation":"

The ID of the workspace to describe the logging configuration for.

", "location":"uri", "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DescribeLoggingConfiguration operation.

" + "documentation":"

Represents the input of a DescribeLoggingConfiguration operation.

" }, "DescribeLoggingConfigurationResponse":{ "type":"structure", @@ -1043,32 +1043,32 @@ "members":{ "loggingConfiguration":{ "shape":"LoggingConfigurationMetadata", - "documentation":"

Metadata object containing information about the logging configuration of a workspace.

" + "documentation":"

A structure that displays the information about the logging configuration.

" } }, - "documentation":"

Represents the output of a DescribeLoggingConfiguration operation.

" + "documentation":"

Represents the output of a DescribeLoggingConfiguration operation.

" }, "DescribeRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", - "name" + "name", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to describe.

", - "location":"uri", - "locationName":"workspaceId" - }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace.

", + "documentation":"

The name of the rule groups namespace that you want information for.

", "location":"uri", "locationName":"name" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace containing the rule groups namespace.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DescribeRuleGroupsNamespace operation.

" + "documentation":"

Represents the input of a DescribeRuleGroupsNamespace operation.

" }, "DescribeRuleGroupsNamespaceResponse":{ "type":"structure", @@ -1076,10 +1076,10 @@ "members":{ "ruleGroupsNamespace":{ "shape":"RuleGroupsNamespaceDescription", - "documentation":"

The selected rule groups namespace.

" + "documentation":"

The information about the rule groups namespace.

" } }, - "documentation":"

Represents the output of a DescribeRuleGroupsNamespace operation.

" + "documentation":"

Represents the output of a DescribeRuleGroupsNamespace operation.

" }, "DescribeScraperRequest":{ "type":"structure", @@ -1087,12 +1087,12 @@ "members":{ "scraperId":{ "shape":"ScraperId", - "documentation":"

The IDs of the scraper to describe.

", + "documentation":"

The ID of the scraper to describe.

", "location":"uri", "locationName":"scraperId" } }, - "documentation":"

Represents the input of a DescribeScraper operation.

" + "documentation":"

Represents the input of a DescribeScraper operation.

" }, "DescribeScraperResponse":{ "type":"structure", @@ -1100,10 +1100,10 @@ "members":{ "scraper":{ "shape":"ScraperDescription", - "documentation":"

The properties of the selected scrapers.

" + "documentation":"

Contains details about the scraper.

" } }, - "documentation":"

Represents the output of a DescribeScraper operation.

" + "documentation":"

Represents the output of a DescribeScraper operation.

" }, "DescribeWorkspaceRequest":{ "type":"structure", @@ -1116,7 +1116,7 @@ "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a DescribeWorkspace operation.

" + "documentation":"

Represents the input of a DescribeWorkspace operation.

" }, "DescribeWorkspaceResponse":{ "type":"structure", @@ -1124,20 +1124,20 @@ "members":{ "workspace":{ "shape":"WorkspaceDescription", - "documentation":"

The properties of the selected workspace.

" + "documentation":"

A structure that contains details about the workspace.

" } }, - "documentation":"

Represents the output of a DescribeWorkspace operation.

" + "documentation":"

Represents the output of a DescribeWorkspace operation.

" }, "Destination":{ "type":"structure", "members":{ "ampConfiguration":{ "shape":"AmpConfiguration", - "documentation":"

A representation of an AMP destination.

" + "documentation":"

The Amazon Managed Service for Prometheusworkspace to send metrics to.

" } }, - "documentation":"

A representation of a destination that a scraper can produce metrics to.

", + "documentation":"

Where to send the metrics from a scraper.

", "union":true }, "EksConfiguration":{ @@ -1149,28 +1149,28 @@ "members":{ "clusterArn":{ "shape":"ClusterArn", - "documentation":"

The ARN of an EKS cluster.

" + "documentation":"

ARN of the Amazon EKS cluster.

" }, "securityGroupIds":{ "shape":"SecurityGroupIds", - "documentation":"

A list of security group IDs specified for VPC configuration.

" + "documentation":"

A list of the security group IDs for the Amazon EKS cluster VPC configuration.

" }, "subnetIds":{ "shape":"SubnetIds", - "documentation":"

A list of subnet IDs specified for VPC configuration.

" + "documentation":"

A list of subnet IDs for the Amazon EKS cluster VPC configuration.

" } }, - "documentation":"

A representation of an EKS source.

" + "documentation":"

The EksConfiguration structure describes the connection to the Amazon EKS cluster from which a scraper collects metrics.

" }, "FilterKey":{ "type":"string", - "documentation":"

The name of the key to filter by.

", + "documentation":"

The name of the key by which to filter.

", "max":256, "min":1 }, "FilterValue":{ "type":"string", - "documentation":"

The value of a given key to filter by.

", + "documentation":"

The value for a given key by which to filter.

", "max":256, "min":1 }, @@ -1185,7 +1185,7 @@ "type":"structure", "members":{ }, - "documentation":"

Represents the input of a GetDefaultScraperConfiguration operation.

" + "documentation":"

Represents the input of a GetDefaultScraperConfiguration operation.

" }, "GetDefaultScraperConfigurationResponse":{ "type":"structure", @@ -1193,10 +1193,10 @@ "members":{ "configuration":{ "shape":"Blob", - "documentation":"

The default configuration.

" + "documentation":"

The configuration file. Base 64 encoded. For more information, see Scraper configurationin the Amazon Managed Service for Prometheus User Guide.

" } }, - "documentation":"

Represents the output of a GetDefaultScraperConfiguration operation.

" + "documentation":"

Represents the output of a GetDefaultScraperConfiguration operation.

" }, "IamRoleArn":{ "type":"string", @@ -1207,7 +1207,7 @@ "documentation":"

An identifier used to ensure the idempotency of a write request.

", "max":64, "min":1, - "pattern":"[!-~]+" + "pattern":"^[!-~]+$" }, "Integer":{ "type":"integer", @@ -1228,7 +1228,7 @@ "locationName":"Retry-After" } }, - "documentation":"

Unexpected error during processing of request.

", + "documentation":"

An unexpected error occurred during the processing of the request.

", "error":{"httpStatusCode":500}, "exception":true, "fault":true, @@ -1239,38 +1239,38 @@ "documentation":"

A KMS Key ARN.

", "max":2048, "min":20, - "pattern":"arn:aws:kms:[a-z0-9\\-]+:\\d+:key/[a-f0-9\\-]+" + "pattern":"^arn:aws:kms:[a-z0-9\\-]+:\\d+:key/[a-f0-9\\-]+$" }, "ListRuleGroupsNamespacesRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace.

", - "location":"uri", - "locationName":"workspaceId" + "maxResults":{ + "shape":"ListRuleGroupsNamespacesRequestMaxResultsInteger", + "documentation":"

The maximum number of results to return. The default is 100.

", + "location":"querystring", + "locationName":"maxResults" }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

Optional filter for rule groups namespace name. Only the rule groups namespace that begin with this value will be returned.

", + "documentation":"

Use this parameter to filter the rule groups namespaces that are returned. Only the namespaces with names that begin with the value that you specify are returned.

", "location":"querystring", "locationName":"name" }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListRuleGroupsNamespaces request.

", + "documentation":"

The token for the next set of items to return. You receive this token from a previous call, and use it to get the next page of results. The other parameters must be the same as the initial call.

For example, if your initial request has maxResults of 10, and there are 12 rule groups namespaces to return, then your initial request will return 10 and a nextToken. Using the next token in a subsequent call will return the remaining 2 namespaces.

", "location":"querystring", "locationName":"nextToken" }, - "maxResults":{ - "shape":"ListRuleGroupsNamespacesRequestMaxResultsInteger", - "documentation":"

Maximum results to return in response (default=100, maximum=1000).

", - "location":"querystring", - "locationName":"maxResults" + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace containing the rule groups namespaces.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a ListRuleGroupsNamespaces operation.

" + "documentation":"

Represents the input of a ListRuleGroupsNamespaces operation.

" }, "ListRuleGroupsNamespacesRequestMaxResultsInteger":{ "type":"integer", @@ -1282,39 +1282,39 @@ "type":"structure", "required":["ruleGroupsNamespaces"], "members":{ - "ruleGroupsNamespaces":{ - "shape":"RuleGroupsNamespaceSummaryList", - "documentation":"

The list of the selected rule groups namespaces.

" - }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

Pagination token to use when requesting the next page in this list.

" + "documentation":"

A token indicating that there are more results to retrieve. You can use this token as part of your next ListRuleGroupsNamespaces request to retrieve those results.

" + }, + "ruleGroupsNamespaces":{ + "shape":"RuleGroupsNamespaceSummaryList", + "documentation":"

The returned list of rule groups namespaces.

" } }, - "documentation":"

Represents the output of a ListRuleGroupsNamespaces operation.

" + "documentation":"

Represents the output of a ListRuleGroupsNamespaces operation.

" }, "ListScrapersRequest":{ "type":"structure", "members":{ "filters":{ "shape":"ScraperFilters", - "documentation":"

A list of scraper filters.

", + "documentation":"

(Optional) A list of key-value pairs to filter the list of scrapers returned. Keys include status, sourceArn, destinationArn, and alias.

Filters on the same key are OR'd together, and filters on different keys are AND'd together. For example, status=ACTIVE&status=CREATING&alias=Test, will return all scrapers that have the alias Test, and are either in status ACTIVE or CREATING.

To find all active scrapers that are sending metrics to a specific Amazon Managed Service for Prometheus workspace, you would use the ARN of the workspace in a query:

status=ACTIVE&destinationArn=arn:aws:aps:us-east-1:123456789012:workspace/ws-example1-1234-abcd-56ef-123456789012

If this is included, it filters the results to only the scrapers that match the filter.

", "location":"querystring" }, - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListScrapers request.

", - "location":"querystring", - "locationName":"nextToken" - }, "maxResults":{ "shape":"ListScrapersRequestMaxResultsInteger", - "documentation":"

Maximum results to return in response (default=100, maximum=1000).

", + "documentation":"

Optional) The maximum number of scrapers to return in one ListScrapers operation. The range is 1-1000.

If you omit this parameter, the default of 100 is used.

", "location":"querystring", "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

(Optional) The token for the next set of items to return. (You received this token from a previous call.)

", + "location":"querystring", + "locationName":"nextToken" } }, - "documentation":"

Represents the input of a ListScrapers operation.

" + "documentation":"

Represents the input of a ListScrapers operation.

" }, "ListScrapersRequestMaxResultsInteger":{ "type":"integer", @@ -1326,16 +1326,16 @@ "type":"structure", "required":["scrapers"], "members":{ - "scrapers":{ - "shape":"ScraperSummaryList", - "documentation":"

The list of scrapers, filtered down if a set of filters was provided in the request.

" - }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

Pagination token to use when requesting the next page in this list.

" + "documentation":"

A token indicating that there are more results to retrieve. You can use this token as part of your next ListScrapers operation to retrieve those results.

" + }, + "scrapers":{ + "shape":"ScraperSummaryList", + "documentation":"

A list of ScraperSummary structures giving information about scrapers in the account that match the filters provided.

" } }, - "documentation":"

Represents the output of a ListScrapers operation.

" + "documentation":"

Represents the output of a ListScrapers operation.

" }, "ListTagsForResourceRequest":{ "type":"structure", @@ -1343,7 +1343,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The ARN of the resource.

", + "documentation":"

The ARN of the resource to list tages for. Must be a workspace or rule groups namespace resource.

", "location":"uri", "locationName":"resourceArn" } @@ -1352,32 +1352,35 @@ "ListTagsForResourceResponse":{ "type":"structure", "members":{ - "tags":{"shape":"TagMap"} + "tags":{ + "shape":"TagMap", + "documentation":"

The list of tag keys and values associated with the resource.

" + } } }, "ListWorkspacesRequest":{ "type":"structure", "members":{ - "nextToken":{ - "shape":"PaginationToken", - "documentation":"

Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListWorkspaces request.

", - "location":"querystring", - "locationName":"nextToken" - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

Optional filter for workspace alias. Only the workspaces with aliases that begin with this value will be returned.

", + "documentation":"

If this is included, it filters the results to only the workspaces with names that start with the value that you specify here.

Amazon Managed Service for Prometheus will automatically strip any blank spaces from the beginning and end of the alias that you specify.

", "location":"querystring", "locationName":"alias" }, "maxResults":{ "shape":"ListWorkspacesRequestMaxResultsInteger", - "documentation":"

Maximum results to return in response (default=100, maximum=1000).

", + "documentation":"

The maximum number of workspaces to return per request. The default is 100.

", "location":"querystring", "locationName":"maxResults" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The token for the next set of items to return. You receive this token from a previous call, and use it to get the next page of results. The other parameters must be the same as the initial call.

For example, if your initial request has maxResults of 10, and there are 12 workspaces to return, then your initial request will return 10 and a nextToken. Using the next token in a subsequent call will return the remaining 2 workspaces.

", + "location":"querystring", + "locationName":"nextToken" } }, - "documentation":"

Represents the input of a ListWorkspaces operation.

" + "documentation":"

Represents the input of a ListWorkspaces operation.

" }, "ListWorkspacesRequestMaxResultsInteger":{ "type":"integer", @@ -1389,53 +1392,53 @@ "type":"structure", "required":["workspaces"], "members":{ - "workspaces":{ - "shape":"WorkspaceSummaryList", - "documentation":"

The list of existing workspaces, including those undergoing creation or deletion.

" - }, "nextToken":{ "shape":"PaginationToken", - "documentation":"

Pagination token to use when requesting the next page in this list.

" + "documentation":"

A token indicating that there are more results to retrieve. You can use this token as part of your next ListWorkspaces request to retrieve those results.

" + }, + "workspaces":{ + "shape":"WorkspaceSummaryList", + "documentation":"

An array of WorkspaceSummary structures containing information about the workspaces requested.

" } }, - "documentation":"

Represents the output of a ListWorkspaces operation.

" + "documentation":"

Represents the output of a ListWorkspaces operation.

" }, "LogGroupArn":{ "type":"string", - "pattern":"arn:aws[a-z0-9-]*:logs:[a-z0-9-]+:\\d{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*" + "pattern":"^arn:aws[a-z0-9-]*:logs:[a-z0-9-]+:\\d{12}:log-group:[A-Za-z0-9\\.\\-\\_\\#/]{1,512}\\:\\*$" }, "LoggingConfigurationMetadata":{ "type":"structure", "required":[ - "status", - "workspace", - "logGroupArn", "createdAt", - "modifiedAt" + "logGroupArn", + "modifiedAt", + "status", + "workspace" ], "members":{ - "status":{ - "shape":"LoggingConfigurationStatus", - "documentation":"

The status of the logging configuration.

" - }, - "workspace":{ - "shape":"WorkspaceId", - "documentation":"

The workspace where the logging configuration exists.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the logging configuration was created.

" }, "logGroupArn":{ "shape":"LogGroupArn", - "documentation":"

The ARN of the CW log group to which the vended log data will be published.

" - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

The time when the logging configuration was created.

" + "documentation":"

The ARN of the CloudWatch log group to which the vended log data will be published.

" }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

The time when the logging configuration was modified.

" + "documentation":"

The date and time that the logging configuration was most recently changed.

" + }, + "status":{ + "shape":"LoggingConfigurationStatus", + "documentation":"

The current status of the logging configuration.

" + }, + "workspace":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace the logging configuration is for.

" } }, - "documentation":"

Represents the properties of a logging configuration metadata.

" + "documentation":"

Contains information about the logging configuration.

" }, "LoggingConfigurationStatus":{ "type":"structure", @@ -1443,14 +1446,14 @@ "members":{ "statusCode":{ "shape":"LoggingConfigurationStatusCode", - "documentation":"

Status code of the logging configuration.

" + "documentation":"

The current status of the logging configuration.

" }, "statusReason":{ "shape":"String", - "documentation":"

The reason for failure if any.

" + "documentation":"

If failed, the reason for the failure.

" } }, - "documentation":"

Represents the status of a logging configuration.

" + "documentation":"

The status of the logging configuration.

" }, "LoggingConfigurationStatusCode":{ "type":"string", @@ -1473,27 +1476,27 @@ "PutAlertManagerDefinitionRequest":{ "type":"structure", "required":[ - "workspaceId", - "data" + "data", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace in which to update the alert manager definition.

", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true }, "data":{ "shape":"AlertManagerDefinitionData", - "documentation":"

The alert manager definition data.

" + "documentation":"

The alert manager definition to use. A base64-encoded version of the YAML alert manager definition file.

For details about the alert manager definition, see AlertManagedDefinitionData.

" }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to update the alert manager definition in.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a PutAlertManagerDefinition operation.

" + "documentation":"

Represents the input of a PutAlertManagerDefinition operation.

" }, "PutAlertManagerDefinitionResponse":{ "type":"structure", @@ -1501,69 +1504,69 @@ "members":{ "status":{ "shape":"AlertManagerDefinitionStatus", - "documentation":"

The status of alert manager definition.

" + "documentation":"

A structure that returns the current status of the alert manager definition.

" } }, - "documentation":"

Represents the output of a PutAlertManagerDefinition operation.

" + "documentation":"

Represents the output of a PutAlertManagerDefinition operation.

" }, "PutRuleGroupsNamespaceRequest":{ "type":"structure", "required":[ - "workspaceId", + "data", "name", - "data" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace in which to update the rule group namespace.

", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true + }, + "data":{ + "shape":"RuleGroupsNamespaceData", + "documentation":"

The new rules file to use in the namespace. A base64-encoded version of the YAML rule groups file.

For details about the rule groups namespace structure, see RuleGroupsNamespaceData.

" }, "name":{ "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace name.

", + "documentation":"

The name of the rule groups namespace that you are updating.

", "location":"uri", "locationName":"name" }, - "data":{ - "shape":"RuleGroupsNamespaceData", - "documentation":"

The namespace data that define the rule groups.

" - }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace where you are updating the rule groups namespace.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of a PutRuleGroupsNamespace operation.

" + "documentation":"

Represents the input of a PutRuleGroupsNamespace operation.

" }, "PutRuleGroupsNamespaceResponse":{ "type":"structure", "required":[ - "name", "arn", + "name", "status" ], "members":{ - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace name.

" - }, "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" + "documentation":"

The ARN of the rule groups namespace.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The name of the rule groups namespace that was updated.

" }, "status":{ "shape":"RuleGroupsNamespaceStatus", - "documentation":"

The status of rule groups namespace.

" + "documentation":"

A structure that includes the current status of the rule groups namespace.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this rule groups namespace.

" + "documentation":"

The list of tag keys and values that are associated with the namespace.

" } }, - "documentation":"

Represents the output of a PutRuleGroupsNamespace operation.

" + "documentation":"

Represents the output of a PutRuleGroupsNamespace operation.

" }, "ResourceNotFoundException":{ "type":"structure", @@ -1586,7 +1589,7 @@ "documentation":"

Type of the resource affected.

" } }, - "documentation":"

Request references a resource which does not exist.

", + "documentation":"

The request references a resources that doesn't exist.

", "error":{ "httpStatusCode":404, "senderFault":true @@ -1605,50 +1608,50 @@ "type":"structure", "required":[ "arn", - "name", - "status", - "data", "createdAt", - "modifiedAt" + "data", + "modifiedAt", + "name", + "status" ], "members":{ "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" + "documentation":"

The ARN of the rule groups namespace.

" }, - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace name.

" - }, - "status":{ - "shape":"RuleGroupsNamespaceStatus", - "documentation":"

The status of rule groups namespace.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the rule groups namespace was created.

" }, "data":{ "shape":"RuleGroupsNamespaceData", - "documentation":"

The rule groups namespace data.

" - }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

The time when the rule groups namespace was created.

" + "documentation":"

The rule groups file used in the namespace.

For details about the rule groups namespace structure, see RuleGroupsNamespaceData.

" }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

The time when the rule groups namespace was modified.

" + "documentation":"

The date and time that the rule groups namespace was most recently changed.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The name of the rule groups namespace.

" + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

The current status of the rule groups namespace.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this rule groups namespace.

" + "documentation":"

The list of tag keys and values that are associated with the rule groups namespace.

" } }, - "documentation":"

Represents a description of the rule groups namespace.

" + "documentation":"

The details about one rule groups namespace.

" }, "RuleGroupsNamespaceName":{ "type":"string", - "documentation":"

The namespace name that the rule group belong to.

", + "documentation":"

The name of the namespace that the rule group belong to.

", "max":64, "min":1, - "pattern":".*[0-9A-Za-z][-.0-9A-Z_a-z]*.*" + "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" }, "RuleGroupsNamespaceStatus":{ "type":"structure", @@ -1656,14 +1659,14 @@ "members":{ "statusCode":{ "shape":"RuleGroupsNamespaceStatusCode", - "documentation":"

Status code of this namespace.

" + "documentation":"

The current status of the namespace.

" }, "statusReason":{ "shape":"String", - "documentation":"

The reason for failure if any.

" + "documentation":"

The reason for the failure, if any.

" } }, - "documentation":"

Represents the status of a namespace.

" + "documentation":"

The status information about a rule groups namespace.

" }, "RuleGroupsNamespaceStatusCode":{ "type":"string", @@ -1681,38 +1684,38 @@ "type":"structure", "required":[ "arn", - "name", - "status", "createdAt", - "modifiedAt" + "modifiedAt", + "name", + "status" ], "members":{ "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" - }, - "name":{ - "shape":"RuleGroupsNamespaceName", - "documentation":"

The rule groups namespace name.

" - }, - "status":{ - "shape":"RuleGroupsNamespaceStatus", - "documentation":"

The status of rule groups namespace.

" + "documentation":"

The ARN of the rule groups namespace.

" }, "createdAt":{ "shape":"Timestamp", - "documentation":"

The time when the rule groups namespace was created.

" + "documentation":"

The date and time that the rule groups namespace was created.

" }, "modifiedAt":{ "shape":"Timestamp", - "documentation":"

The time when the rule groups namespace was modified.

" + "documentation":"

The date and time that the rule groups namespace was most recently changed.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The name of the rule groups namespace.

" + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

A structure that displays the current status of the rule groups namespace.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this rule groups namespace.

" + "documentation":"

The list of tag keys and values that are associated with the rule groups namespace.

" } }, - "documentation":"

Represents a summary of the rule groups namespace.

" + "documentation":"

The high-level information about a rule groups namespace. To retrieve more information, use DescribeRuleGroupsNamespace.

" }, "RuleGroupsNamespaceSummaryList":{ "type":"list", @@ -1724,10 +1727,10 @@ "members":{ "configurationBlob":{ "shape":"Blob", - "documentation":"

Binary data representing a Prometheus configuration file.

" + "documentation":"

The base 64 encoded scrape configuration file.

" } }, - "documentation":"

A representation of a Prometheus configuration file.

", + "documentation":"

A scrape configuration for a scraper, base 64 encoded. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

", "union":true }, "ScraperAlias":{ @@ -1735,7 +1738,7 @@ "documentation":"

A user-assigned scraper alias.

", "max":100, "min":1, - "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + "pattern":"^[0-9A-Za-z][-.0-9A-Z_a-z]*$" }, "ScraperArn":{ "type":"string", @@ -1744,77 +1747,77 @@ "ScraperDescription":{ "type":"structure", "required":[ - "scraperId", "arn", - "roleArn", - "status", "createdAt", + "destination", "lastModifiedAt", + "roleArn", "scrapeConfiguration", + "scraperId", "source", - "destination" + "status" ], "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"

Alias of this scraper.

" - }, - "scraperId":{ - "shape":"ScraperId", - "documentation":"

Unique string identifying this scraper.

" + "documentation":"

(Optional) A name associated with the scraper.

" }, "arn":{ "shape":"ScraperArn", - "documentation":"

The Amazon Resource Name (ARN) of this scraper.

" - }, - "roleArn":{ - "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to dsicover, collect, and produce metrics on your behalf.

" - }, - "status":{ - "shape":"ScraperStatus", - "documentation":"

The status of this scraper.

" + "documentation":"

The Amazon Resource Name (ARN) of the scraper.

" }, "createdAt":{ "shape":"Timestamp", - "documentation":"

The time when the scraper was created.

" + "documentation":"

The date and time that the scraper was created.

" + }, + "destination":{ + "shape":"Destination", + "documentation":"

The Amazon Managed Service for Prometheus workspace the scraper sends metrics to.

" }, "lastModifiedAt":{ "shape":"Timestamp", - "documentation":"

The time when the scraper was last modified.

" - }, - "tags":{ - "shape":"TagMap", - "documentation":"

The tags of this scraper.

" + "documentation":"

The date and time that the scraper was last modified.

" }, - "statusReason":{ - "shape":"StatusReason", - "documentation":"

The reason for failure if any.

" + "roleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.

" }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", - "documentation":"

The configuration used to create the scraper.

" + "documentation":"

The configuration file in use by the scraper.

" + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

The ID of the scraper.

" }, "source":{ "shape":"Source", - "documentation":"

The source that the scraper is discovering and collecting metrics from.

" + "documentation":"

The Amazon EKS cluster from which the scraper collects metrics.

" }, - "destination":{ - "shape":"Destination", - "documentation":"

The destination that the scraper is producing metrics to.

" + "status":{ + "shape":"ScraperStatus", + "documentation":"

A structure that contains the current status of the scraper.

" + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

If there is a failure, the reason for the failure.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

(Optional) The list of tag keys and values associated with the scraper.

" } }, - "documentation":"

Represents the properties of a scraper.

" + "documentation":"

The ScraperDescription structure contains the full details about one scraper in your account.

" }, "ScraperFilters":{ "type":"map", "key":{ "shape":"FilterKey", - "documentation":"

The name of the key to filter by. Currently supported filter keys are 'status', 'sourceArn', 'destinationArn', and 'alias'.

" + "documentation":"

The name of the key to filter by. Currently supported filter keys are status, sourceArn, destinationArn, and alias.

" }, "value":{ "shape":"FilterValues", - "documentation":"

The values of the given key to filter by.

" + "documentation":"

The values of the given key by which to filter.

" }, "documentation":"

A list of scraper filters.

", "max":4, @@ -1825,7 +1828,7 @@ "documentation":"

A scraper ID.

", "max":64, "min":1, - "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + "pattern":"^[0-9A-Za-z][-.0-9A-Z_a-z]*$" }, "ScraperStatus":{ "type":"structure", @@ -1833,10 +1836,10 @@ "members":{ "statusCode":{ "shape":"ScraperStatusCode", - "documentation":"

Status code of this scraper.

" + "documentation":"

The current status of the scraper.

" } }, - "documentation":"

Represents the status of a scraper.

" + "documentation":"

The ScraperStatus structure contains status information about the scraper.

" }, "ScraperStatusCode":{ "type":"string", @@ -1852,62 +1855,62 @@ "ScraperSummary":{ "type":"structure", "required":[ - "scraperId", "arn", - "roleArn", - "status", "createdAt", + "destination", "lastModifiedAt", + "roleArn", + "scraperId", "source", - "destination" + "status" ], "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"

Alias of this scraper.

" - }, - "scraperId":{ - "shape":"ScraperId", - "documentation":"

Unique string identifying this scraper.

" + "documentation":"

(Optional) A name associated with the scraper.

" }, "arn":{ "shape":"ScraperArn", - "documentation":"

The Amazon Resource Name (ARN) of this scraper.

" - }, - "roleArn":{ - "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to dsicover, collect, and produce metrics on your behalf.

" - }, - "status":{ - "shape":"ScraperStatus", - "documentation":"

The status of this scraper.

" + "documentation":"

The Amazon Resource Name (ARN) of the scraper.

" }, "createdAt":{ "shape":"Timestamp", - "documentation":"

The time when the scraper was created.

" + "documentation":"

The date and time that the scraper was created.

" + }, + "destination":{ + "shape":"Destination", + "documentation":"

The Amazon Managed Service for Prometheus workspace the scraper sends metrics to.

" }, "lastModifiedAt":{ "shape":"Timestamp", - "documentation":"

The time when the scraper was last modified.

" + "documentation":"

The date and time that the scraper was last modified.

" }, - "tags":{ - "shape":"TagMap", - "documentation":"

The tags of this scraper.

" + "roleArn":{ + "shape":"IamRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.

" }, - "statusReason":{ - "shape":"StatusReason", - "documentation":"

The reason for failure if any.

" + "scraperId":{ + "shape":"ScraperId", + "documentation":"

The ID of the scraper.

" }, "source":{ "shape":"Source", - "documentation":"

The source that the scraper is discovering and collecting metrics from.

" + "documentation":"

The Amazon EKS cluster from which the scraper collects metrics.

" }, - "destination":{ - "shape":"Destination", - "documentation":"

The destination that the scraper is producing metrics to.

" + "status":{ + "shape":"ScraperStatus", + "documentation":"

A structure that contains the current status of the scraper.

" + }, + "statusReason":{ + "shape":"StatusReason", + "documentation":"

If there is a failure, the reason for the failure.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

(Optional) The list of tag keys and values associated with the scraper.

" } }, - "documentation":"

Represents a summary of the properties of a scraper.

" + "documentation":"

The ScraperSummary structure contains a summary of the details about one scraper in your account.

" }, "ScraperSummaryList":{ "type":"list", @@ -1919,7 +1922,7 @@ "documentation":"

ID of a VPC security group.

", "max":255, "min":0, - "pattern":"sg-[0-9a-z]+" + "pattern":"^sg-[0-9a-z]+$" }, "SecurityGroupIds":{ "type":"list", @@ -1932,16 +1935,20 @@ "type":"structure", "required":[ "message", + "quotaCode", "resourceId", "resourceType", - "serviceCode", - "quotaCode" + "serviceCode" ], "members":{ "message":{ "shape":"String", "documentation":"

Description of the error.

" }, + "quotaCode":{ + "shape":"String", + "documentation":"

Service quotas code of the originating quota.

" + }, "resourceId":{ "shape":"String", "documentation":"

Identifier of the resource affected.

" @@ -1952,14 +1959,10 @@ }, "serviceCode":{ "shape":"String", - "documentation":"

Service Quotas requirement to identify originating service.

" - }, - "quotaCode":{ - "shape":"String", - "documentation":"

Service Quotas requirement to identify originating quota.

" + "documentation":"

Service quotas code for the originating service.

" } }, - "documentation":"

Request would cause a service quota to be exceeded.

", + "documentation":"

Completing the request would cause a service quota to be exceeded.

", "error":{ "httpStatusCode":402, "senderFault":true @@ -1971,15 +1974,15 @@ "members":{ "eksConfiguration":{ "shape":"EksConfiguration", - "documentation":"

A representation of an EKS source.

" + "documentation":"

The Amazon EKS cluster from which a scraper collects metrics.

" } }, - "documentation":"

A representation of a source that a scraper can discover and collect metrics from.

", + "documentation":"

The source of collected metrics for a scraper.

", "union":true }, "StatusReason":{ "type":"string", - "documentation":"

The reason for failure if any.

", + "documentation":"

The reason for the failure, if any.

", "max":256, "min":1 }, @@ -1989,7 +1992,7 @@ "documentation":"

ID of a VPC subnet.

", "max":255, "min":0, - "pattern":"subnet-[0-9a-z]+" + "pattern":"^subnet-[0-9a-z]+$" }, "SubnetIds":{ "type":"list", @@ -2002,7 +2005,7 @@ "type":"string", "max":128, "min":1, - "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "TagKeys":{ "type":"list", @@ -2012,11 +2015,11 @@ "type":"map", "key":{ "shape":"TagKey", - "documentation":"

The key of the tag.

Constraints: Tag keys are case-sensitive and accept a maximum of 128 Unicode characters. Valid characters are Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @ May not begin with aws:.

" + "documentation":"

The key of the tag. May not begin with aws:.

" }, "value":{ "shape":"TagValue", - "documentation":"

The value of the tag.

Constraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters. Valid characters are Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @

" + "documentation":"

The value of the tag.

" }, "documentation":"

The list of tags assigned to the resource.

", "max":50, @@ -2031,11 +2034,14 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The ARN of the resource.

", + "documentation":"

The ARN of the workspace or rule groups namespace to apply tags to.

", "location":"uri", "locationName":"resourceArn" }, - "tags":{"shape":"TagMap"} + "tags":{ + "shape":"TagMap", + "documentation":"

The list of tag keys and values to associate with the resource.

Keys may not begin with aws:.

" + } } }, "TagResourceResponse":{ @@ -2047,7 +2053,7 @@ "type":"string", "max":256, "min":0, - "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, "ThrottlingException":{ "type":"structure", @@ -2057,22 +2063,22 @@ "shape":"String", "documentation":"

Description of the error.

" }, - "serviceCode":{ - "shape":"String", - "documentation":"

Service Quotas requirement to identify originating service.

" - }, "quotaCode":{ "shape":"String", - "documentation":"

Service Quotas requirement to identify originating quota.

" + "documentation":"

Service quotas code for the originating quota.

" }, "retryAfterSeconds":{ "shape":"Integer", "documentation":"

Advice to clients on when the call can be safely retried.

", "location":"header", "locationName":"Retry-After" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

Service quotas code for the originating service.

" } }, - "documentation":"

Request was denied due to request throttling.

", + "documentation":"

The request was denied due to request throttling.

", "error":{ "httpStatusCode":429, "senderFault":true @@ -2090,13 +2096,13 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The ARN of the resource.

", + "documentation":"

The ARN of the workspace or rule groups namespace.

", "location":"uri", "locationName":"resourceArn" }, "tagKeys":{ "shape":"TagKeys", - "documentation":"

One or more tag keys

", + "documentation":"

The keys of the tags to remove.

", "location":"querystring", "locationName":"tagKeys" } @@ -2110,27 +2116,27 @@ "UpdateLoggingConfigurationRequest":{ "type":"structure", "required":[ - "workspaceId", - "logGroupArn" + "logGroupArn", + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace to vend logs to.

", - "location":"uri", - "locationName":"workspaceId" + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true }, "logGroupArn":{ "shape":"LogGroupArn", - "documentation":"

The ARN of the CW log group to which the vended log data will be published.

" + "documentation":"

The ARN of the CloudWatch log group to which the vended log data will be published.

" }, - "clientToken":{ - "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", - "idempotencyToken":true + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to update the logging configuration for.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of an UpdateLoggingConfiguration operation.

" + "documentation":"

Represents the input of an UpdateLoggingConfiguration operation.

" }, "UpdateLoggingConfigurationResponse":{ "type":"structure", @@ -2138,32 +2144,32 @@ "members":{ "status":{ "shape":"LoggingConfigurationStatus", - "documentation":"

The status of the logging configuration.

" + "documentation":"

A structure that contains the current status of the logging configuration.

" } }, - "documentation":"

Represents the output of an UpdateLoggingConfiguration operation.

" + "documentation":"

Represents the output of an UpdateLoggingConfiguration operation.

" }, "UpdateWorkspaceAliasRequest":{ "type":"structure", "required":["workspaceId"], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

The ID of the workspace being updated.

", - "location":"uri", - "locationName":"workspaceId" - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

The new alias of the workspace.

" + "documentation":"

The new alias for the workspace. It does not need to be unique.

Amazon Managed Service for Prometheus will automatically strip any blank spaces from the beginning and end of the alias that you specify.

" }, "clientToken":{ "shape":"IdempotencyToken", - "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", "idempotencyToken":true + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to update.

", + "location":"uri", + "locationName":"workspaceId" } }, - "documentation":"

Represents the input of an UpdateWorkspaceAlias operation.

" + "documentation":"

Represents the input of an UpdateWorkspaceAlias operation.

" }, "Uri":{ "type":"string", @@ -2177,6 +2183,10 @@ "reason" ], "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The field that caused the error, if applicable.

" + }, "message":{ "shape":"String", "documentation":"

Description of the error.

" @@ -2184,13 +2194,9 @@ "reason":{ "shape":"ValidationExceptionReason", "documentation":"

Reason the request failed validation.

" - }, - "fieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

The field that caused the error, if applicable. If more than one field caused the error, pick one and elaborate in the message.

" } }, - "documentation":"

The input fails to satisfy the constraints specified by an AWS service.

", + "documentation":"

The input fails to satisfy the constraints specified by an Amazon Web Services service.

", "error":{ "httpStatusCode":400, "senderFault":true @@ -2200,20 +2206,20 @@ "ValidationExceptionField":{ "type":"structure", "required":[ - "name", - "message" + "message", + "name" ], "members":{ - "name":{ + "message":{ "shape":"String", - "documentation":"

The field name.

" + "documentation":"

A message describing why the field caused an exception.

" }, - "message":{ + "name":{ "shape":"String", - "documentation":"

Message describing why the field failed validation.

" + "documentation":"

The name of the field that caused an exception.

" } }, - "documentation":"

Stores information about a field passed inside a request that resulted in an exception.

" + "documentation":"

Information about a field passed into a request that resulted in an exception.

" }, "ValidationExceptionFieldList":{ "type":"list", @@ -2239,58 +2245,58 @@ "WorkspaceArn":{ "type":"string", "documentation":"

An ARN identifying a Workspace.

", - "pattern":"arn:aws[-a-z]*:aps:[-a-z0-9]+:[0-9]{12}:workspace/.+" + "pattern":"^arn:aws[-a-z]*:aps:[-a-z0-9]+:[0-9]{12}:workspace/.+$" }, "WorkspaceDescription":{ "type":"structure", "required":[ - "workspaceId", "arn", + "createdAt", "status", - "createdAt" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

Unique string identifying this workspace.

" - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

Alias of this workspace.

" + "documentation":"

The alias that is assigned to this workspace to help identify it. It may not be unique.

" }, "arn":{ "shape":"WorkspaceArn", - "documentation":"

The Amazon Resource Name (ARN) of this workspace.

" + "documentation":"

The ARN of the workspace.

" }, - "status":{ - "shape":"WorkspaceStatus", - "documentation":"

The status of this workspace.

" + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The date and time that the workspace was created.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

(optional) If the workspace was created with a customer managed KMS key, the ARN for the key used.

" }, "prometheusEndpoint":{ "shape":"Uri", - "documentation":"

Prometheus endpoint URI.

" + "documentation":"

The Prometheus endpoint available for this workspace.

" }, - "createdAt":{ - "shape":"Timestamp", - "documentation":"

The time when the workspace was created.

" + "status":{ + "shape":"WorkspaceStatus", + "documentation":"

The current status of the workspace.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this workspace.

" + "documentation":"

The list of tag keys and values that are associated with the workspace.

" }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

The customer managed KMS key of this workspace.

" + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The unique ID for the workspace.

" } }, - "documentation":"

Represents the properties of a workspace.

" + "documentation":"

The full details about one Amazon Managed Service for Prometheus workspace in your account.

" }, "WorkspaceId":{ "type":"string", "documentation":"

A workspace ID.

", "max":64, "min":1, - "pattern":".*[0-9A-Za-z][-.0-9A-Z_a-z]*.*" + "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" }, "WorkspaceStatus":{ "type":"structure", @@ -2298,10 +2304,10 @@ "members":{ "statusCode":{ "shape":"WorkspaceStatusCode", - "documentation":"

Status code of this workspace.

" + "documentation":"

The current status of the workspace.

" } }, - "documentation":"

Represents the status of a workspace.

" + "documentation":"

The status of the workspace.

" }, "WorkspaceStatusCode":{ "type":"string", @@ -2317,42 +2323,42 @@ "WorkspaceSummary":{ "type":"structure", "required":[ - "workspaceId", "arn", + "createdAt", "status", - "createdAt" + "workspaceId" ], "members":{ - "workspaceId":{ - "shape":"WorkspaceId", - "documentation":"

Unique string identifying this workspace.

" - }, "alias":{ "shape":"WorkspaceAlias", - "documentation":"

Alias of this workspace.

" + "documentation":"

The alias that is assigned to this workspace to help identify it. It may not be unique.

" }, "arn":{ "shape":"WorkspaceArn", - "documentation":"

The AmazonResourceName of this workspace.

" - }, - "status":{ - "shape":"WorkspaceStatus", - "documentation":"

The status of this workspace.

" + "documentation":"

The ARN of the workspace.

" }, "createdAt":{ "shape":"Timestamp", - "documentation":"

The time when the workspace was created.

" + "documentation":"

The date and time that the workspace was created.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

(optional) If the workspace was created with a customer managed KMS key, the ARN for the key used.

" + }, + "status":{ + "shape":"WorkspaceStatus", + "documentation":"

The current status of the workspace.

" }, "tags":{ "shape":"TagMap", - "documentation":"

The tags of this workspace.

" + "documentation":"

The list of tag keys and values that are associated with the workspace.

" }, - "kmsKeyArn":{ - "shape":"KmsKeyArn", - "documentation":"

Customer managed KMS key ARN for this workspace

" + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The unique ID for the workspace.

" } }, - "documentation":"

Represents a summary of the properties of a workspace.

" + "documentation":"

The information about one Amazon Managed Service for Prometheus workspace in your account.

" }, "WorkspaceSummaryList":{ "type":"list", @@ -2360,5 +2366,5 @@ "documentation":"

A list of workspace summaries.

" } }, - "documentation":"

Amazon Managed Service for Prometheus

" + "documentation":"

Amazon Managed Service for Prometheus is a serverless, Prometheus-compatible monitoring service for container metrics that makes it easier to securely monitor container environments at scale. With Amazon Managed Service for Prometheus, you can use the same open-source Prometheus data model and query language that you use today to monitor the performance of your containerized workloads, and also enjoy improved scalability, availability, and security without having to manage the underlying infrastructure.

For more information about Amazon Managed Service for Prometheus, see the Amazon Managed Service for Prometheus User Guide.

Amazon Managed Service for Prometheus includes two APIs.

" } diff -Nru awscli-2.15.9/awscli/botocore/data/amplify/2017-07-25/service-2.json awscli-2.15.22/awscli/botocore/data/amplify/2017-07-25/service-2.json --- awscli-2.15.9/awscli/botocore/data/amplify/2017-07-25/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/amplify/2017-07-25/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -97,7 +97,7 @@ {"shape":"LimitExceededException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Creates a new domain association for an Amplify app. This action associates a custom domain with the Amplify app

" + "documentation":"

Creates a new domain association for an Amplify app. This action associates a custom domain with the Amplify app

" }, "CreateWebhook":{ "name":"CreateWebhook", @@ -183,7 +183,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Deletes a domain association for an Amplify app.

" + "documentation":"

Deletes a domain association for an Amplify app.

" }, "DeleteJob":{ "name":"DeleteJob", @@ -314,7 +314,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Returns the domain information for an Amplify app.

" + "documentation":"

Returns the domain information for an Amplify app.

" }, "GetJob":{ "name":"GetJob", @@ -424,7 +424,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Returns the domain associations for an Amplify app.

" + "documentation":"

Returns the domain associations for an Amplify app.

" }, "ListJobs":{ "name":"ListJobs", @@ -602,7 +602,7 @@ {"shape":"InternalFailureException"}, {"shape":"DependentServiceFailureException"} ], - "documentation":"

Creates a new domain association for an Amplify app.

" + "documentation":"

Creates a new domain association for an Amplify app.

" }, "UpdateWebhook":{ "name":"UpdateWebhook", @@ -1112,6 +1112,53 @@ "pattern":"(?s).+", "sensitive":true }, + "Certificate":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"CertificateType", + "documentation":"

The type of SSL/TLS certificate that you want to use.

Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you.

Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide .

" + }, + "customCertificateArn":{ + "shape":"CertificateArn", + "documentation":"

The Amazon resource name (ARN) for a custom certificate that you have already added to Certificate Manager in your Amazon Web Services account.

This field is required only when the certificate type is CUSTOM.

" + }, + "certificateVerificationDNSRecord":{ + "shape":"CertificateVerificationDNSRecord", + "documentation":"

The DNS record for certificate verification.

" + } + }, + "documentation":"

Describes the current SSL/TLS certificate that is in use for the domain. If you are using CreateDomainAssociation to create a new domain association, Certificate describes the new certificate that you are creating.

" + }, + "CertificateArn":{ + "type":"string", + "max":1000, + "min":0, + "pattern":"^arn:aws:acm:[a-z0-9-]+:\\d{12}:certificate\\/.+$" + }, + "CertificateSettings":{ + "type":"structure", + "required":["type"], + "members":{ + "type":{ + "shape":"CertificateType", + "documentation":"

The certificate type.

Specify AMPLIFY_MANAGED to use the default certificate that Amplify provisions for you.

Specify CUSTOM to use your own certificate that you have already added to Certificate Manager in your Amazon Web Services account. Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see Importing certificates into Certificate Manager in the ACM User guide.

" + }, + "customCertificateArn":{ + "shape":"CertificateArn", + "documentation":"

The Amazon resource name (ARN) for the custom certificate that you have already added to Certificate Manager in your Amazon Web Services account.

This field is required only when the certificate type is CUSTOM.

" + } + }, + "documentation":"

The type of SSL/TLS certificate to use for your custom domain. If a certificate type isn't specified, Amplify uses the default AMPLIFY_MANAGED certificate.

" + }, + "CertificateType":{ + "type":"string", + "enum":[ + "AMPLIFY_MANAGED", + "CUSTOM" + ] + }, "CertificateVerificationDNSRecord":{ "type":"string", "max":1000 @@ -1444,6 +1491,10 @@ "autoSubDomainIAMRole":{ "shape":"AutoSubDomainIAMRole", "documentation":"

The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.

" + }, + "certificateSettings":{ + "shape":"CertificateSettings", + "documentation":"

The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.

" } }, "documentation":"

The request structure for the create domain association request.

" @@ -1527,7 +1578,7 @@ }, "status":{ "shape":"Status", - "documentation":"

The status code for a URL rewrite or redirect rule.

200

Represents a 200 rewrite rule.

301

Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.

302

Represents a 302 temporary redirect rule.

404

Represents a 404 redirect rule.

404-200

Represents a 404 rewrite rule.

" + "documentation":"

The status code for a URL rewrite or redirect rule.

200

Represents a 200 rewrite rule.

301

Represents a 301 (moved permanently) redirect rule. This and all future requests should be directed to the target URL.

302

Represents a 302 temporary redirect rule.

404

Represents a 404 redirect rule.

404-200

Represents a 404 rewrite rule.

" }, "condition":{ "shape":"Condition", @@ -1786,9 +1837,13 @@ "shape":"DomainStatus", "documentation":"

The current status of the domain association.

" }, + "updateStatus":{ + "shape":"UpdateStatus", + "documentation":"

The status of the domain update operation that is currently in progress. The following list describes the valid update states.

REQUESTING_CERTIFICATE

The certificate is in the process of being updated.

PENDING_VERIFICATION

Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.

IMPORTING_CUSTOM_CERTIFICATE

Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.

PENDING_DEPLOYMENT

Indicates that the subdomain or certificate changes are being propagated.

AWAITING_APP_CNAME

Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route 53, Amplify handles this for you automatically. For more information about custom domains, see Setting up custom domains in the Amplify Hosting User Guide.

UPDATE_COMPLETE

The certificate has been associated with a domain.

UPDATE_FAILED

The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.

" + }, "statusReason":{ "shape":"StatusReason", - "documentation":"

The reason for the current status of the domain association.

" + "documentation":"

Additional information that describes why the domain association is in the current state.

" }, "certificateVerificationDNSRecord":{ "shape":"CertificateVerificationDNSRecord", @@ -1797,9 +1852,13 @@ "subDomains":{ "shape":"SubDomains", "documentation":"

The subdomains for the domain association.

" + }, + "certificate":{ + "shape":"Certificate", + "documentation":"

Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.

If you are updating your domain to use a different certificate, certificate points to the new certificate that is being created instead of the current active certificate. Otherwise, certificate points to the current active certificate.

" } }, - "documentation":"

Describes a domain association that associates a custom domain with an Amplify app.

" + "documentation":"

Describes the association between a custom domain and an Amplify app.

" }, "DomainAssociationArn":{ "type":"string", @@ -1826,7 +1885,9 @@ "PENDING_VERIFICATION", "IN_PROGRESS", "AVAILABLE", + "IMPORTING_CUSTOM_CERTIFICATE", "PENDING_DEPLOYMENT", + "AWAITING_APP_CNAME", "FAILED", "CREATING", "REQUESTING_CERTIFICATE", @@ -3354,6 +3415,10 @@ "autoSubDomainIAMRole":{ "shape":"AutoSubDomainIAMRole", "documentation":"

The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.

" + }, + "certificateSettings":{ + "shape":"CertificateSettings", + "documentation":"

The type of SSL/TLS certificate to use for your custom domain.

" } }, "documentation":"

The request structure for the update domain association request.

" @@ -3369,6 +3434,18 @@ }, "documentation":"

The result structure for the update domain association request.

" }, + "UpdateStatus":{ + "type":"string", + "enum":[ + "REQUESTING_CERTIFICATE", + "PENDING_VERIFICATION", + "IMPORTING_CUSTOM_CERTIFICATE", + "PENDING_DEPLOYMENT", + "AWAITING_APP_CNAME", + "UPDATE_COMPLETE", + "UPDATE_FAILED" + ] + }, "UpdateTime":{"type":"timestamp"}, "UpdateWebhookRequest":{ "type":"structure", diff -Nru awscli-2.15.9/awscli/botocore/data/appconfigdata/2021-11-11/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/appconfigdata/2021-11-11/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/appconfigdata/2021-11-11/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/appconfigdata/2021-11-11/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,13 +212,38 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-us-gov" + ] + } + ], + "endpoint": { + "url": "https://appconfigdata.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { "conditions": [], "endpoint": { "url": "https://appconfigdata-fips.{Region}.{PartitionResult#dnsSuffix}", @@ -231,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/appsync/2017-07-25/service-2.json awscli-2.15.22/awscli/botocore/data/appsync/2017-07-25/service-2.json --- awscli-2.15.9/awscli/botocore/data/appsync/2017-07-25/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/appsync/2017-07-25/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -146,7 +146,8 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"}, - {"shape":"InternalFailureException"} + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} ], "documentation":"

Creates a Function object.

A function is a reusable entity. You can use multiple functions to compose the resolver logic.

" }, @@ -281,7 +282,8 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"}, - {"shape":"InternalFailureException"} + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} ], "documentation":"

Deletes a Function.

" }, @@ -548,6 +550,23 @@ ], "documentation":"

Retrieves a GraphqlApi object.

" }, + "GetGraphqlApiEnvironmentVariables":{ + "name":"GetGraphqlApiEnvironmentVariables", + "http":{ + "method":"GET", + "requestUri":"/v1/apis/{apiId}/environmentVariables" + }, + "input":{"shape":"GetGraphqlApiEnvironmentVariablesRequest"}, + "output":{"shape":"GetGraphqlApiEnvironmentVariablesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalFailureException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves the list of environmental variable key-value pairs associated with an API by its ID value.

" + }, "GetIntrospectionSchema":{ "name":"GetIntrospectionSchema", "http":{ @@ -806,6 +825,24 @@ ], "documentation":"

Lists Type objects by the source API association ID.

" }, + "PutGraphqlApiEnvironmentVariables":{ + "name":"PutGraphqlApiEnvironmentVariables", + "http":{ + "method":"PUT", + "requestUri":"/v1/apis/{apiId}/environmentVariables" + }, + "input":{"shape":"PutGraphqlApiEnvironmentVariablesRequest"}, + "output":{"shape":"PutGraphqlApiEnvironmentVariablesResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"InternalFailureException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a list of environmental variables in an API by its ID value.

When creating an environmental variable, it must follow the constraints below:

When creating an environmental variable key-value pair, it must follow the additional constraints below:

You can create a list of environmental variables by adding it to the environmentVariables payload as a list in the format {\"key1\":\"value1\",\"key2\":\"value2\", …}. Note that each call of the PutGraphqlApiEnvironmentVariables action will result in the overwriting of the existing environmental variable list of that API. This means the existing environmental variables will be lost. To avoid this, you must include all existing and new environmental variables in the list each time you call this action.

" + }, "StartDataSourceIntrospection":{ "name":"StartDataSourceIntrospection", "http":{ @@ -973,7 +1010,8 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"}, {"shape":"UnauthorizedException"}, - {"shape":"InternalFailureException"} + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} ], "documentation":"

Updates a Function object.

" }, @@ -1131,6 +1169,10 @@ "status":{ "shape":"ApiCacheStatus", "documentation":"

The cache instance status.

" + }, + "healthMetricsConfig":{ + "shape":"CacheHealthMetricsConfig", + "documentation":"

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

" } }, "documentation":"

The ApiCache object.

" @@ -1423,6 +1465,13 @@ "Blob":{"type":"blob"}, "Boolean":{"type":"boolean"}, "BooleanValue":{"type":"boolean"}, + "CacheHealthMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "CachingConfig":{ "type":"structure", "required":["ttl"], @@ -1583,6 +1632,10 @@ "type":{ "shape":"ApiCacheType", "documentation":"

The cache instance type. Valid values are

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are available, but their use is discouraged:

" + }, + "healthMetricsConfig":{ + "shape":"CacheHealthMetricsConfig", + "documentation":"

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

" } }, "documentation":"

Represents the input of a CreateApiCache operation.

" @@ -1683,6 +1736,10 @@ "eventBridgeConfig":{ "shape":"EventBridgeDataSourceConfig", "documentation":"

Amazon EventBridge settings.

" + }, + "metricsConfig":{ + "shape":"DataSourceLevelMetricsConfig", + "documentation":"

Enables or disables enhanced data source metrics for specified data sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

metricsConfig can be ENABLED or DISABLED.

" } } }, @@ -1854,6 +1911,10 @@ "resolverCountLimit":{ "shape":"ResolverCountLimit", "documentation":"

The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds.

" + }, + "enhancedMetricsConfig":{ + "shape":"EnhancedMetricsConfig", + "documentation":"

The enhancedMetricsConfig object.

" } } }, @@ -1926,6 +1987,10 @@ "code":{ "shape":"Code", "documentation":"

The resolver code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS.

" + }, + "metricsConfig":{ + "shape":"ResolverLevelMetricsConfig", + "documentation":"

Enables or disables enhanced resolver metrics for specified resolvers. Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

metricsConfig can be ENABLED or DISABLED.

" } } }, @@ -2021,6 +2086,10 @@ "eventBridgeConfig":{ "shape":"EventBridgeDataSourceConfig", "documentation":"

Amazon EventBridge settings.

" + }, + "metricsConfig":{ + "shape":"DataSourceLevelMetricsConfig", + "documentation":"

Enables or disables enhanced data source metrics for specified data sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

metricsConfig can be ENABLED or DISABLED.

" } }, "documentation":"

Describes a data source.

" @@ -2147,6 +2216,20 @@ "SUCCESS" ] }, + "DataSourceLevelMetricsBehavior":{ + "type":"string", + "enum":[ + "FULL_REQUEST_DATA_SOURCE_METRICS", + "PER_DATA_SOURCE_METRICS" + ] + }, + "DataSourceLevelMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "DataSourceType":{ "type":"string", "enum":[ @@ -2547,6 +2630,47 @@ }, "documentation":"

Describes an OpenSearch data source configuration.

As of September 2021, Amazon Elasticsearch service is Amazon OpenSearch Service. This configuration is deprecated. For new data sources, use OpenSearchServiceDataSourceConfig to specify an OpenSearch data source.

" }, + "EnhancedMetricsConfig":{ + "type":"structure", + "required":[ + "resolverLevelMetricsBehavior", + "dataSourceLevelMetricsBehavior", + "operationLevelMetricsConfig" + ], + "members":{ + "resolverLevelMetricsBehavior":{ + "shape":"ResolverLevelMetricsBehavior", + "documentation":"

Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

" + }, + "dataSourceLevelMetricsBehavior":{ + "shape":"DataSourceLevelMetricsBehavior", + "documentation":"

Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

" + }, + "operationLevelMetricsConfig":{ + "shape":"OperationLevelMetricsConfig", + "documentation":"

Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:

Metrics will be recorded by API ID and operation name. You can set the value to ENABLED or DISABLED.

" + } + }, + "documentation":"

Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.

Enhanced metrics can be configured at the resolver, data source, and operation levels. EnhancedMetricsConfig contains three required parameters, each controlling one of these categories:

  1. resolverLevelMetricsBehavior: Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:

    • GraphQL errors: The number of GraphQL errors that occurred.

    • Requests: The number of invocations that occurred during a request.

    • Latency: The time to complete a resolver invocation.

    • Cache hits: The number of cache hits during a request.

    • Cache misses: The number of cache misses during a request.

    These metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. resolverLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_RESOLVER_METRICS: Records and emits metric data for all resolvers in the request.

    • PER_RESOLVER_METRICS: Records and emits metric data for resolvers that have the metricConfig value set to ENABLED.

  2. dataSourceLevelMetricsBehavior: Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:

    • Requests: The number of invocations that occured during a request.

    • Latency: The time to complete a data source invocation.

    • Errors: The number of errors that occurred during a data source invocation.

    These metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. dataSourceLevelMetricsBehavior accepts one of these values at a time:

    • FULL_REQUEST_DATA_SOURCE_METRICS: Records and emits metric data for all data sources in the request.

    • PER_DATA_SOURCE_METRICS: Records and emits metric data for data sources that have the metricConfig value set to ENABLED.

  3. operationLevelMetricsConfig: Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:

    • Requests: The number of times a specified GraphQL operation was called.

    • GraphQL errors: The number of GraphQL errors that occurred during a specified GraphQL operation.

    Metrics will be recorded by API ID and operation name. You can set the value to ENABLED or DISABLED.

" + }, + "EnvironmentVariableKey":{ + "type":"string", + "max":64, + "min":2, + "pattern":"^[A-Za-z]+\\w*$" + }, + "EnvironmentVariableMap":{ + "type":"map", + "key":{"shape":"EnvironmentVariableKey"}, + "value":{"shape":"EnvironmentVariableValue"}, + "max":50, + "min":0 + }, + "EnvironmentVariableValue":{ + "type":"string", + "max":512, + "min":0 + }, "ErrorDetail":{ "type":"structure", "members":{ @@ -2925,6 +3049,27 @@ } } }, + "GetGraphqlApiEnvironmentVariablesRequest":{ + "type":"structure", + "required":["apiId"], + "members":{ + "apiId":{ + "shape":"String", + "documentation":"

The ID of the API from which the environmental variable list will be retrieved.

", + "location":"uri", + "locationName":"apiId" + } + } + }, + "GetGraphqlApiEnvironmentVariablesResponse":{ + "type":"structure", + "members":{ + "environmentVariables":{ + "shape":"EnvironmentVariableMap", + "documentation":"

The payload containing each environmental variable in the \"key\" : \"value\" format.

" + } + } + }, "GetGraphqlApiRequest":{ "type":"structure", "required":["apiId"], @@ -3232,6 +3377,10 @@ "resolverCountLimit":{ "shape":"ResolverCountLimit", "documentation":"

The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds.

" + }, + "enhancedMetricsConfig":{ + "shape":"EnhancedMetricsConfig", + "documentation":"

The enhancedMetricsConfig object.

" } }, "documentation":"

Describes a GraphQL API.

" @@ -3850,6 +3999,13 @@ }, "documentation":"

Describes an OpenSearch data source configuration.

" }, + "OperationLevelMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "OutputType":{ "type":"string", "enum":[ @@ -3880,6 +4036,34 @@ }, "documentation":"

The pipeline configuration for a resolver of kind PIPELINE.

" }, + "PutGraphqlApiEnvironmentVariablesRequest":{ + "type":"structure", + "required":[ + "apiId", + "environmentVariables" + ], + "members":{ + "apiId":{ + "shape":"String", + "documentation":"

The ID of the API to which the environmental variable list will be written.

", + "location":"uri", + "locationName":"apiId" + }, + "environmentVariables":{ + "shape":"EnvironmentVariableMap", + "documentation":"

The list of environmental variables to add to the API.

When creating an environmental variable key-value pair, it must follow the additional constraints below:

You can create a list of environmental variables by adding it to the environmentVariables payload as a list in the format {\"key1\":\"value1\",\"key2\":\"value2\", …}. Note that each call of the PutGraphqlApiEnvironmentVariables action will result in the overwriting of the existing environmental variable list of that API. This means the existing environmental variables will be lost. To avoid this, you must include all existing and new environmental variables in the list each time you call this action.

" + } + } + }, + "PutGraphqlApiEnvironmentVariablesResponse":{ + "type":"structure", + "members":{ + "environmentVariables":{ + "shape":"EnvironmentVariableMap", + "documentation":"

The payload containing each environmental variable in the \"key\" : \"value\" format.

" + } + } + }, "QueryDepthLimit":{ "type":"integer", "max":75, @@ -4020,6 +4204,10 @@ "code":{ "shape":"Code", "documentation":"

The resolver code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS.

" + }, + "metricsConfig":{ + "shape":"ResolverLevelMetricsConfig", + "documentation":"

Enables or disables enhanced resolver metrics for specified resolvers. Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

metricsConfig can be ENABLED or DISABLED.

" } }, "documentation":"

Describes a resolver.

" @@ -4036,6 +4224,20 @@ "PIPELINE" ] }, + "ResolverLevelMetricsBehavior":{ + "type":"string", + "enum":[ + "FULL_REQUEST_RESOLVER_METRICS", + "PER_RESOLVER_METRICS" + ] + }, + "ResolverLevelMetricsConfig":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "Resolvers":{ "type":"list", "member":{"shape":"Resolver"} @@ -4441,6 +4643,10 @@ "type":{ "shape":"ApiCacheType", "documentation":"

The cache instance type. Valid values are

Historically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.

The following legacy instance types are available, but their use is discouraged:

" + }, + "healthMetricsConfig":{ + "shape":"CacheHealthMetricsConfig", + "documentation":"

Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:

Metrics will be recorded by API ID. You can set the value to ENABLED or DISABLED.

" } }, "documentation":"

Represents the input of a UpdateApiCache operation.

" @@ -4552,6 +4758,10 @@ "eventBridgeConfig":{ "shape":"EventBridgeDataSourceConfig", "documentation":"

The new Amazon EventBridge settings.

" + }, + "metricsConfig":{ + "shape":"DataSourceLevelMetricsConfig", + "documentation":"

Enables or disables enhanced data source metrics for specified data sources. Note that metricsConfig won't be used unless the dataSourceLevelMetricsBehavior value is set to PER_DATA_SOURCE_METRICS. If the dataSourceLevelMetricsBehavior is set to FULL_REQUEST_DATA_SOURCE_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

metricsConfig can be ENABLED or DISABLED.

" } } }, @@ -4719,6 +4929,10 @@ "resolverCountLimit":{ "shape":"ResolverCountLimit", "documentation":"

The maximum number of resolvers that can be invoked in a single request. The default value is 0 (or unspecified), which will set the limit to 10000. When specified, the limit value can be between 1 and 10000. This field will produce a limit error if the operation falls out of bounds.

" + }, + "enhancedMetricsConfig":{ + "shape":"EnhancedMetricsConfig", + "documentation":"

The enhancedMetricsConfig object.

" } } }, @@ -4793,6 +5007,10 @@ "code":{ "shape":"Code", "documentation":"

The resolver code that contains the request and response functions. When code is used, the runtime is required. The runtime value must be APPSYNC_JS.

" + }, + "metricsConfig":{ + "shape":"ResolverLevelMetricsConfig", + "documentation":"

Enables or disables enhanced resolver metrics for specified resolvers. Note that metricsConfig won't be used unless the resolverLevelMetricsBehavior value is set to PER_RESOLVER_METRICS. If the resolverLevelMetricsBehavior is set to FULL_REQUEST_RESOLVER_METRICS instead, metricsConfig will be ignored. However, you can still set its value.

metricsConfig can be ENABLED or DISABLED.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/artifact/2018-05-10/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://artifact.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff -Nru awscli-2.15.9/awscli/botocore/data/artifact/2018-05-10/paginators-1.json awscli-2.15.22/awscli/botocore/data/artifact/2018-05-10/paginators-1.json --- awscli-2.15.9/awscli/botocore/data/artifact/2018-05-10/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/artifact/2018-05-10/paginators-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,10 @@ +{ + "pagination": { + "ListReports": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "reports" + } + } +} diff -Nru awscli-2.15.9/awscli/botocore/data/artifact/2018-05-10/service-2.json awscli-2.15.22/awscli/botocore/data/artifact/2018-05-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/artifact/2018-05-10/service-2.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/artifact/2018-05-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,737 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-05-10", + "endpointPrefix":"artifact", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Artifact", + "serviceId":"Artifact", + "signatureVersion":"v4", + "signingName":"artifact", + "uid":"artifact-2018-05-10" + }, + "operations":{ + "GetAccountSettings":{ + "name":"GetAccountSettings", + "http":{ + "method":"GET", + "requestUri":"/v1/account-settings/get", + "responseCode":200 + }, + "input":{"shape":"GetAccountSettingsRequest"}, + "output":{"shape":"GetAccountSettingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Get the account settings for Artifact.

" + }, + "GetReport":{ + "name":"GetReport", + "http":{ + "method":"GET", + "requestUri":"/v1/report/get", + "responseCode":200 + }, + "input":{"shape":"GetReportRequest"}, + "output":{"shape":"GetReportResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Get the content for a single report.

" + }, + "GetReportMetadata":{ + "name":"GetReportMetadata", + "http":{ + "method":"GET", + "requestUri":"/v1/report/getMetadata", + "responseCode":200 + }, + "input":{"shape":"GetReportMetadataRequest"}, + "output":{"shape":"GetReportMetadataResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Get the metadata for a single report.

" + }, + "GetTermForReport":{ + "name":"GetTermForReport", + "http":{ + "method":"GET", + "requestUri":"/v1/report/getTermForReport", + "responseCode":200 + }, + "input":{"shape":"GetTermForReportRequest"}, + "output":{"shape":"GetTermForReportResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Get the Term content associated with a single report.

" + }, + "ListReports":{ + "name":"ListReports", + "http":{ + "method":"GET", + "requestUri":"/v1/report/list", + "responseCode":200 + }, + "input":{"shape":"ListReportsRequest"}, + "output":{"shape":"ListReportsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

List available reports.

" + }, + "PutAccountSettings":{ + "name":"PutAccountSettings", + "http":{ + "method":"PUT", + "requestUri":"/v1/account-settings/put", + "responseCode":200 + }, + "input":{"shape":"PutAccountSettingsRequest"}, + "output":{"shape":"PutAccountSettingsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Put the account settings for Artifact.

", + "idempotent":true + } + }, + "shapes":{ + "AcceptanceType":{ + "type":"string", + "enum":[ + "PASSTHROUGH", + "EXPLICIT" + ] + }, + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

User does not have sufficient access to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountSettings":{ + "type":"structure", + "members":{ + "notificationSubscriptionStatus":{ + "shape":"NotificationSubscriptionStatus", + "documentation":"

Notification subscription status of the customer.

" + } + }, + "documentation":"

Account settings for the customer.

" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

Identifier of the affected resource.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

Type of the affected resource.

" + } + }, + "documentation":"

Request to create/modify content would result in a conflict.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "GetAccountSettingsRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountSettingsResponse":{ + "type":"structure", + "members":{ + "accountSettings":{"shape":"AccountSettings"} + } + }, + "GetReportMetadataRequest":{ + "type":"structure", + "required":["reportId"], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

", + "location":"querystring", + "locationName":"reportId" + }, + "reportVersion":{ + "shape":"VersionAttribute", + "documentation":"

Version for the report resource.

", + "location":"querystring", + "locationName":"reportVersion" + } + } + }, + "GetReportMetadataResponse":{ + "type":"structure", + "members":{ + "reportDetails":{ + "shape":"ReportDetail", + "documentation":"

Report resource detail.

" + } + } + }, + "GetReportRequest":{ + "type":"structure", + "required":[ + "reportId", + "termToken" + ], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

", + "location":"querystring", + "locationName":"reportId" + }, + "reportVersion":{ + "shape":"VersionAttribute", + "documentation":"

Version for the report resource.

", + "location":"querystring", + "locationName":"reportVersion" + }, + "termToken":{ + "shape":"ShortStringAttribute", + "documentation":"

Unique download token provided by GetTermForReport API.

", + "location":"querystring", + "locationName":"termToken" + } + } + }, + "GetReportResponse":{ + "type":"structure", + "members":{ + "documentPresignedUrl":{ + "shape":"GetReportResponseDocumentPresignedUrlString", + "documentation":"

Presigned S3 url to access the report content.

" + } + } + }, + "GetReportResponseDocumentPresignedUrlString":{ + "type":"string", + "max":10240, + "min":1 + }, + "GetTermForReportRequest":{ + "type":"structure", + "required":["reportId"], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

", + "location":"querystring", + "locationName":"reportId" + }, + "reportVersion":{ + "shape":"VersionAttribute", + "documentation":"

Version for the report resource.

", + "location":"querystring", + "locationName":"reportVersion" + } + } + }, + "GetTermForReportResponse":{ + "type":"structure", + "members":{ + "documentPresignedUrl":{ + "shape":"GetTermForReportResponseDocumentPresignedUrlString", + "documentation":"

Presigned S3 url to access the term content.

" + }, + "termToken":{ + "shape":"String", + "documentation":"

Unique token representing this request event.

" + } + } + }, + "GetTermForReportResponseDocumentPresignedUrlString":{ + "type":"string", + "max":10240, + "min":1 + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

Number of seconds in which the caller can retry the request.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

An unknown server exception has occurred.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ListReportsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResultsAttribute", + "documentation":"

Maximum number of resources to return in the paginated response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

Pagination token to request the next page of resources.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListReportsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

Pagination token to request the next page of resources.

" + }, + "reports":{ + "shape":"ReportsList", + "documentation":"

List of report resources.

" + } + } + }, + "LongStringAttribute":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^[^<>]*$" + }, + "MaxResultsAttribute":{ + "type":"integer", + "box":true, + "max":300, + "min":1 + }, + "NextTokenAttribute":{ + "type":"string", + "max":2048, + "min":1 + }, + "NotificationSubscriptionStatus":{ + "type":"string", + "enum":[ + "SUBSCRIBED", + "NOT_SUBSCRIBED" + ] + }, + "PublishedState":{ + "type":"string", + "enum":[ + "PUBLISHED", + "UNPUBLISHED" + ] + }, + "PutAccountSettingsRequest":{ + "type":"structure", + "members":{ + "notificationSubscriptionStatus":{ + "shape":"NotificationSubscriptionStatus", + "documentation":"

Desired notification subscription status.

" + } + } + }, + "PutAccountSettingsResponse":{ + "type":"structure", + "members":{ + "accountSettings":{"shape":"AccountSettings"} + } + }, + "ReportDetail":{ + "type":"structure", + "members":{ + "acceptanceType":{ + "shape":"AcceptanceType", + "documentation":"

Acceptance type for report.

" + }, + "arn":{ + "shape":"LongStringAttribute", + "documentation":"

ARN for the report resource.

" + }, + "category":{ + "shape":"ShortStringAttribute", + "documentation":"

Category for the report resource.

" + }, + "companyName":{ + "shape":"ShortStringAttribute", + "documentation":"

Associated company name for the report resource.

" + }, + "createdAt":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating when the report resource was created.

" + }, + "deletedAt":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating when the report resource was deleted.

" + }, + "description":{ + "shape":"LongStringAttribute", + "documentation":"

Description for the report resource.

" + }, + "id":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

" + }, + "lastModifiedAt":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating when the report resource was last modified.

" + }, + "name":{ + "shape":"ShortStringAttribute", + "documentation":"

Name for the report resource.

" + }, + "periodEnd":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating the report resource effective end.

" + }, + "periodStart":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating the report resource effective start.

" + }, + "productName":{ + "shape":"ShortStringAttribute", + "documentation":"

Associated product name for the report resource.

" + }, + "sequenceNumber":{ + "shape":"SequenceNumberAttribute", + "documentation":"

Sequence number to enforce optimistic locking.

" + }, + "series":{ + "shape":"ShortStringAttribute", + "documentation":"

Series for the report resource.

" + }, + "state":{ + "shape":"PublishedState", + "documentation":"

Current state of the report resource

" + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

The message associated with the current upload state.

" + }, + "termArn":{ + "shape":"LongStringAttribute", + "documentation":"

Unique resource ARN for term resource.

" + }, + "uploadState":{ + "shape":"UploadState", + "documentation":"

The current state of the document upload.

" + }, + "version":{ + "shape":"VersionAttribute", + "documentation":"

Version for the report resource.

" + } + }, + "documentation":"

Full detail for report resource metadata.

" + }, + "ReportId":{ + "type":"string", + "pattern":"^report-[a-zA-Z0-9]{16}$" + }, + "ReportSummary":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"LongStringAttribute", + "documentation":"

ARN for the report resource.

" + }, + "category":{ + "shape":"ShortStringAttribute", + "documentation":"

Category for the report resource.

" + }, + "companyName":{ + "shape":"ShortStringAttribute", + "documentation":"

Associated company name for the report resource.

" + }, + "description":{ + "shape":"LongStringAttribute", + "documentation":"

Description for the report resource.

" + }, + "id":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

" + }, + "name":{ + "shape":"ShortStringAttribute", + "documentation":"

Name for the report resource.

" + }, + "periodEnd":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating the report resource effective end.

" + }, + "periodStart":{ + "shape":"TimestampAttribute", + "documentation":"

Timestamp indicating the report resource effective start.

" + }, + "productName":{ + "shape":"ShortStringAttribute", + "documentation":"

Associated product name for the report resource.

" + }, + "series":{ + "shape":"ShortStringAttribute", + "documentation":"

Series for the report resource.

" + }, + "state":{ + "shape":"PublishedState", + "documentation":"

Current state of the report resource.

" + }, + "statusMessage":{ + "shape":"StatusMessage", + "documentation":"

The message associated with the current upload state.

" + }, + "uploadState":{ + "shape":"UploadState", + "documentation":"

The current state of the document upload.

" + }, + "version":{ + "shape":"VersionAttribute", + "documentation":"

Version for the report resource.

" + } + }, + "documentation":"

Summary for report resource.

" + }, + "ReportsList":{ + "type":"list", + "member":{"shape":"ReportSummary"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{"shape":"String"}, + "resourceId":{ + "shape":"String", + "documentation":"

Identifier of the affected resource.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

Type of the affected resource.

" + } + }, + "documentation":"

Request references a resource which does not exist.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SequenceNumberAttribute":{ + "type":"long", + "box":true, + "min":1 + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "quotaCode", + "resourceId", + "resourceType", + "serviceCode" + ], + "members":{ + "message":{"shape":"String"}, + "quotaCode":{ + "shape":"String", + "documentation":"

Code for the affected quota.

" + }, + "resourceId":{ + "shape":"String", + "documentation":"

Identifier of the affected resource.

" + }, + "resourceType":{ + "shape":"String", + "documentation":"

Type of the affected resource.

" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

Code for the affected service.

" + } + }, + "documentation":"

Request would cause a service quota to be exceeded.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "ShortStringAttribute":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9_\\-\\s]*$" + }, + "StatusMessage":{"type":"string"}, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"}, + "quotaCode":{ + "shape":"String", + "documentation":"

Code for the affected quota.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

Number of seconds in which the caller can retry the request.

", + "location":"header", + "locationName":"Retry-After" + }, + "serviceCode":{ + "shape":"String", + "documentation":"

Code for the affected service.

" + } + }, + "documentation":"

Request was denied due to request throttling.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "TimestampAttribute":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "UploadState":{ + "type":"string", + "enum":[ + "PROCESSING", + "COMPLETE", + "FAILED", + "FAULT" + ] + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The field that caused the error, if applicable.

" + }, + "message":{"shape":"String"}, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

Reason the request failed validation.

" + } + }, + "documentation":"

Request fails to satisfy the constraints specified by an AWS service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "message", + "name" + ], + "members":{ + "message":{ + "shape":"String", + "documentation":"

Message describing why the field failed validation.

" + }, + "name":{ + "shape":"String", + "documentation":"

Name of validation exception.

" + } + }, + "documentation":"

Validation exception message and name.

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "unknownOperation", + "cannotParse", + "fieldValidationFailed", + "invalidToken", + "other" + ] + }, + "VersionAttribute":{ + "type":"long", + "box":true, + "min":1 + } + }, + "documentation":"

This reference provides descriptions of the low-level AWS Artifact Service API.

" +} diff -Nru awscli-2.15.9/awscli/botocore/data/athena/2017-05-18/service-2.json awscli-2.15.22/awscli/botocore/data/athena/2017-05-18/service-2.json --- awscli-2.15.9/awscli/botocore/data/athena/2017-05-18/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/athena/2017-05-18/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -536,7 +536,7 @@ {"shape":"InvalidRequestException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Imports a single ipynb file to a Spark enabled workgroup. The maximum file size that can be imported is 10 megabytes. If an ipynb file with the same name already exists in the workgroup, throws an error.

" + "documentation":"

Imports a single ipynb file to a Spark enabled workgroup. To import the notebook, the request must specify a value for either Payload or NoteBookS3LocationUri. If neither is specified or both are specified, an InvalidRequestException occurs. The maximum file size that can be imported is 10 megabytes. If an ipynb file with the same name already exists in the workgroup, throws an error.

" }, "ListApplicationDPUSizes":{ "name":"ListApplicationDPUSizes", @@ -2656,7 +2656,6 @@ "required":[ "WorkGroup", "Name", - "Payload", "Type" ], "members":{ @@ -2670,12 +2669,16 @@ }, "Payload":{ "shape":"Payload", - "documentation":"

The notebook content to be imported.

" + "documentation":"

The notebook content to be imported. The payload must be in ipynb format.

" }, "Type":{ "shape":"NotebookType", "documentation":"

The notebook content type. Currently, the only valid type is IPYNB.

" }, + "NotebookS3LocationUri":{ + "shape":"S3Uri", + "documentation":"

A URI that specifies the Amazon S3 location of a notebook file in ipynb format.

" + }, "ClientRequestToken":{ "shape":"ClientRequestToken", "documentation":"

A unique case-sensitive string used to ensure the request to import the notebook is idempotent (executes only once).

This token is listed as not required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail.

" @@ -4875,7 +4878,7 @@ }, "ExecutionRole":{ "shape":"RoleArn", - "documentation":"

The ARN of the execution role used to access user resources for Spark sessions and Identity Center enabled workgroups. This property applies only to Spark enabled workgroups and Identity Center enabled workgroups.

" + "documentation":"

The ARN of the execution role used to access user resources for Spark sessions and IAM Identity Center enabled workgroups. This property applies only to Spark enabled workgroups and IAM Identity Center enabled workgroups. The property is required for IAM Identity Center enabled workgroups.

" }, "CustomerContentEncryptionConfiguration":{ "shape":"CustomerContentEncryptionConfiguration", diff -Nru awscli-2.15.9/awscli/botocore/data/autoscaling/2011-01-01/service-2.json awscli-2.15.22/awscli/botocore/data/autoscaling/2011-01-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/autoscaling/2011-01-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/autoscaling/2011-01-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -393,7 +393,7 @@ {"shape":"InvalidNextToken"}, {"shape":"ResourceContentionFault"} ], - "documentation":"

Gets information about the instance refreshes for the specified Auto Scaling group.

This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.

To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.

" + "documentation":"

Gets information about the instance refreshes for the specified Auto Scaling group from the previous six weeks.

This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.

To help you determine the status of an instance refresh, Amazon EC2 Auto Scaling returns information about the instance refreshes you previously initiated, including their status, start time, end time, the percentage of the instance refresh that is complete, and the number of instances remaining to update before the instance refresh is complete. If a rollback is initiated while an instance refresh is in progress, Amazon EC2 Auto Scaling also returns information about the rollback of the instance refresh.

" }, "DescribeLaunchConfigurations":{ "name":"DescribeLaunchConfigurations", @@ -3102,11 +3102,11 @@ "members":{ "MinHealthyPercentage":{ "shape":"IntPercentResettable", - "documentation":"

Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. After it's set, a value of -1 will clear the previously set value.

" + "documentation":"

Specifies the lower threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the minimum percentage of the group to keep in service, healthy, and ready to use to support your workload when replacing instances. Value range is 0 to 100. To clear a previously set value, specify a value of -1.

" }, "MaxHealthyPercentage":{ "shape":"IntPercent100To200Resettable", - "documentation":"

Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. After it's set, a value of -1 will clear the previously set value.

Both MinHealthyPercentage and MaxHealthyPercentage must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.

" + "documentation":"

Specifies the upper threshold as a percentage of the desired capacity of the Auto Scaling group. It represents the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. Value range is 100 to 200. To clear a previously set value, specify a value of -1.

Both MinHealthyPercentage and MaxHealthyPercentage must be specified, and the difference between them cannot be greater than 100. A large range increases the number of instances that can be replaced at the same time.

" } }, "documentation":"

Describes an instance maintenance policy.

For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide.

" @@ -3323,11 +3323,15 @@ }, "SpotMaxPricePercentageOverLowestPrice":{ "shape":"NullablePositiveInteger", - "documentation":"

The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.

Default: 100

" + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified.

Default: 100

" + }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice":{ + "shape":"NullablePositiveInteger", + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.

To indicate no price protection threshold, specify a high value, such as 999999.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice is used and the value for that parameter defaults to 100.

" }, "OnDemandMaxPricePercentageOverLowestPrice":{ "shape":"NullablePositiveInteger", - "documentation":"

The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.

Default: 20

" + "documentation":"

[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per instance price.

Default: 20

" }, "BareMetal":{ "shape":"BareMetal", @@ -4085,7 +4089,7 @@ "documentation":"

The unit to use for the returned data points. For a complete list of the units that CloudWatch supports, see the MetricDatum data type in the Amazon CloudWatch API Reference.

" } }, - "documentation":"

This structure defines the CloudWatch metric to return, along with the statistic, period, and unit.

For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.

" + "documentation":"

This structure defines the CloudWatch metric to return, along with the statistic and unit.

For more information about the CloudWatch terminology below, see Amazon CloudWatch concepts in the Amazon CloudWatch User Guide.

" }, "MetricStatistic":{ "type":"string", diff -Nru awscli-2.15.9/awscli/botocore/data/b2bi/2022-06-23/service-2.json awscli-2.15.22/awscli/botocore/data/b2bi/2022-06-23/service-2.json --- awscli-2.15.9/awscli/botocore/data/b2bi/2022-06-23/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/b2bi/2022-06-23/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -298,7 +298,8 @@ "output":{"shape":"ListTagsForResourceResponse"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} ], "documentation":"

Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The resource can be a capability, partnership, profile, or transformer.

" }, @@ -340,7 +341,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} ], "documentation":"

Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). Resources are capability, partnership, profile, transformers and other entities.

There is no response returned from this call.

" }, @@ -377,7 +379,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Parses the input EDI (electronic data interchange) file.

", + "documentation":"

Parses the input EDI (electronic data interchange) file. The input file has a file size limit of 250 KB.

", "idempotent":true }, "UntagResource":{ @@ -389,7 +391,8 @@ "input":{"shape":"UntagResourceRequest"}, "errors":[ {"shape":"ValidationException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} ], "documentation":"

Detaches a key-value pair from the specified resource, as identified by its Amazon Resource Name (ARN). Resources are capability, partnership, profile, transformers and other entities.

", "idempotent":true @@ -1772,7 +1775,7 @@ }, "TestMappingInputFileContent":{ "type":"string", - "max":200000, + "max":5000000, "min":0 }, "TestMappingRequest":{ diff -Nru awscli-2.15.9/awscli/botocore/data/batch/2016-08-10/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/batch/2016-08-10/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/batch/2016-08-10/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/batch/2016-08-10/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws" ] } ], @@ -252,7 +248,6 @@ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -261,7 +256,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -281,14 +277,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -302,7 +300,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -322,7 +319,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -333,14 +329,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -351,9 +349,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/batch/2016-08-10/service-2.json awscli-2.15.22/awscli/botocore/data/batch/2016-08-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/batch/2016-08-10/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/batch/2016-08-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -661,7 +661,7 @@ }, "allocationStrategy":{ "shape":"CRAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT (default)

Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

SPOT_PRICE_CAPACITY_OPTIMIZED

The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" + "documentation":"

The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT (default)

Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, Batch waits for the additional instances to be available. If there aren't enough instances available or the user is reaching Amazon EC2 service limits, additional jobs aren't run until the currently running jobs are completed. This allocation strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use a BEST_FIT allocation strategy don't support infrastructure updates and can't update some parameters. For more information, see Updating compute environments in the Batch User Guide.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

SPOT_PRICE_CAPACITY_OPTIMIZED

The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" }, "minvCpus":{ "shape":"Integer", @@ -669,7 +669,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of vCPUs that a compute environment can support.

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

" + "documentation":"

The maximum number of vCPUs that a compute environment can support.

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" }, "desiredvCpus":{ "shape":"Integer", @@ -699,7 +699,7 @@ }, "instanceRole":{ "shape":"String", - "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. This parameter is required for Amazon EC2 instances types. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" }, "tags":{ "shape":"TagsMap", @@ -737,7 +737,7 @@ }, "maxvCpus":{ "shape":"Integer", - "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

" + "documentation":"

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" }, "desiredvCpus":{ "shape":"Integer", @@ -753,7 +753,7 @@ }, "allocationStrategy":{ "shape":"CRUpdateAllocationStrategy", - "documentation":"

The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

SPOT_PRICE_CAPACITY_OPTIMIZED

The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

With both BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" + "documentation":"

The allocation strategy to use for the compute resource if there's not enough instances of the best fitting instance type that can be allocated. This might be because of availability of the instance type in the Region or Amazon EC2 service limits. For more information, see Allocation strategies in the Batch User Guide.

When updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. BEST_FIT isn't supported when updating a compute environment.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

BEST_FIT_PROGRESSIVE

Batch selects additional instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If additional instances of the previously selected instance types aren't available, Batch selects new instance types.

SPOT_CAPACITY_OPTIMIZED

Batch selects one or more instance types that are large enough to meet the requirements of the jobs in the queue. Its preference is for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.

SPOT_PRICE_CAPACITY_OPTIMIZED

The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED (recommended) strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance.

" }, "instanceTypes":{ "shape":"StringList", @@ -765,7 +765,7 @@ }, "instanceRole":{ "shape":"String", - "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. Required for Amazon EC2 instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, ecsInstanceRole or arn:aws:iam::<aws_account_id>:instance-profile/ecsInstanceRole . For more information, see Amazon ECS instance role in the Batch User Guide.

When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" }, "tags":{ "shape":"TagsMap", @@ -913,7 +913,14 @@ "shape":"EphemeralStorage", "documentation":"

The amount of ephemeral storage allocated for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate.

" }, - "runtimePlatform":{"shape":"RuntimePlatform"} + "runtimePlatform":{ + "shape":"RuntimePlatform", + "documentation":"

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + }, + "repositoryCredentials":{ + "shape":"RepositoryCredentials", + "documentation":"

The private repository authentication credentials to use.

" + } }, "documentation":"

An object that represents the details of a container that's part of a job.

" }, @@ -956,7 +963,7 @@ "members":{ "image":{ "shape":"String", - "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . It can be 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources.

" + "documentation":"

Required. The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with repository-url/image:tag . It can be 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run.

Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources.

" }, "vcpus":{ "shape":"Integer", @@ -1042,7 +1049,14 @@ "shape":"EphemeralStorage", "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate.

" }, - "runtimePlatform":{"shape":"RuntimePlatform"} + "runtimePlatform":{ + "shape":"RuntimePlatform", + "documentation":"

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + }, + "repositoryCredentials":{ + "shape":"RepositoryCredentials", + "documentation":"

The private repository authentication credentials to use.

" + } }, "documentation":"

Container properties are used for Amazon ECS based job definitions. These properties to describe the container that's launched as part of a job.

" }, @@ -1302,7 +1316,7 @@ "members":{ "jobDefinitions":{ "shape":"StringList", - "documentation":"

A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision} or a short version using the form ${JobDefinitionName}:${Revision}.

" + "documentation":"

A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision} or a short version using the form ${JobDefinitionName}:${Revision}. This parameter can't be used with other parameters.

" }, "maxResults":{ "shape":"Integer", @@ -1503,7 +1517,7 @@ "members":{ "imageType":{ "shape":"ImageType", - "documentation":"

The image type to match with the instance type to select an AMI. The supported values are different for ECS and EKS resources.

ECS

If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's supported by Batch is used.

ECS_AL2

Amazon Linux 2: Default for all non-GPU instance families.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU): Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

ECS_AL1

Amazon Linux. Amazon Linux has reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

EKS

If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux AMI (EKS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

EKS_AL2

Amazon Linux 2: Default for all non-GPU instance families.

EKS_AL2_NVIDIA

Amazon Linux 2 (accelerated): Default for all GPU instance families (for example, P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

" + "documentation":"

The image type to match with the instance type to select an AMI. The supported values are different for ECS and EKS resources.

ECS

If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's supported by Batch is used.

ECS_AL2

Amazon Linux 2: Default for all non-GPU instance families.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU): Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

ECS_AL2023

Amazon Linux 2023: Batch supports Amazon Linux 2023.

Amazon Linux 2023 does not support A1 instances.

ECS_AL1

Amazon Linux. Amazon Linux has reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

EKS

If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux AMI (EKS_AL2) is used. If a new image type is specified in an update, but neither an imageId nor a imageIdOverride parameter is specified, then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

EKS_AL2

Amazon Linux 2: Default for all non-GPU instance families.

EKS_AL2_NVIDIA

Amazon Linux 2 (accelerated): Default for all GPU instance families (for example, P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

" }, "imageIdOverride":{ "shape":"ImageIdOverride", @@ -1907,7 +1921,10 @@ "shape":"String", "documentation":"

The name of the node for this job.

" }, - "metadata":{"shape":"EksMetadata"} + "metadata":{ + "shape":"EksMetadata", + "documentation":"

Describes and uniquely identifies Kubernetes resources. For example, the compute environment that a pod runs in or the jobID for a job running in the pod. For more information, see Understanding Kubernetes Objects in the Kubernetes documentation.

" + } }, "documentation":"

The details for the pod.

" }, @@ -2053,7 +2070,7 @@ }, "computeReservation":{ "shape":"Integer", - "documentation":"

A value used to reserve some of the available maximum vCPU for fair share identifiers that aren't already used.

The reserved ratio is (computeReservation/100)^ActiveFairShares where ActiveFairShares is the number of active fair share identifiers.

For example, a computeReservation value of 50 indicates that Batchreserves 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if there are two fair share identifiers. It reserves 12.5% if there are three fair share identifiers. A computeReservation value of 25 indicates that Batch should reserve 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are two fair share identifiers, and 1.56% if there are three fair share identifiers.

The minimum value is 0 and the maximum value is 99.

" + "documentation":"

A value used to reserve some of the available maximum vCPU for fair share identifiers that aren't already used.

The reserved ratio is (computeReservation/100)^ActiveFairShares where ActiveFairShares is the number of active fair share identifiers.

For example, a computeReservation value of 50 indicates that Batch reserves 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if there are two fair share identifiers. It reserves 12.5% if there are three fair share identifiers. A computeReservation value of 25 indicates that Batch should reserve 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are two fair share identifiers, and 1.56% if there are three fair share identifiers.

The minimum value is 0 and the maximum value is 99.

" }, "shareDistribution":{ "shape":"ShareAttributesList", @@ -2274,7 +2291,7 @@ }, "startedAt":{ "shape":"Long", - "documentation":"

The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's when the job transitioned from the STARTING state to the RUNNING state. This parameter isn't provided for child jobs of array jobs or multi-node parallel jobs.

" + "documentation":"

The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's when the job transitioned from the STARTING state to the RUNNING state.

" }, "stoppedAt":{ "shape":"Long", @@ -2724,7 +2741,7 @@ "documentation":"

The name of the volume to mount.

" } }, - "documentation":"

Details for a Docker volume mount point that's used in a job's container properties. This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

" + "documentation":"

Details for a Docker volume mount point that's used in a job's container properties. This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

" }, "MountPoints":{ "type":"list", @@ -2977,6 +2994,17 @@ } } }, + "RepositoryCredentials":{ + "type":"structure", + "required":["credentialsParameter"], + "members":{ + "credentialsParameter":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the secret containing the private repository credentials.

" + } + }, + "documentation":"

The repository credentials for private registry authentication.

" + }, "ResourceRequirement":{ "type":"structure", "required":[ @@ -3033,14 +3061,14 @@ "members":{ "operatingSystemFamily":{ "shape":"String", - "documentation":"

The operating system for the compute environment. Valid values are: LINUX (default), WINDOWS_SERVER_2019_CORE, WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and WINDOWS_SERVER_2022_FULL.

The following parameters can’t be set for Windows containers: linuxParameters, privileged, user, ulimits, readonlyRootFilesystem, and efsVolumeConfiguration.

The Batch Scheduler checks before registering a task definition with Fargate. If the job requires a Windows container and the first compute environment is LINUX, the compute environment is skipped and the next is checked until a Windows-based compute environment is found.

Fargate Spot is not supported for Windows-based containers on Fargate. A job queue will be blocked if a Fargate Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue.

" + "documentation":"

The operating system for the compute environment. Valid values are: LINUX (default), WINDOWS_SERVER_2019_CORE, WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and WINDOWS_SERVER_2022_FULL.

The following parameters can’t be set for Windows containers: linuxParameters, privileged, user, ulimits, readonlyRootFilesystem, and efsVolumeConfiguration.

The Batch Scheduler checks the compute environments that are attached to the job queue before registering a task definition with Fargate. In this scenario, the job queue is where the job is submitted. If the job requires a Windows container and the first compute environment is LINUX, the compute environment is skipped and the next compute environment is checked until a Windows-based compute environment is found.

Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue.

" }, "cpuArchitecture":{ "shape":"String", - "documentation":"

The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.

This parameter must be set to X86_64 for Windows containers.

" + "documentation":"

The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64.

This parameter must be set to X86_64 for Windows containers.

Fargate Spot is not supported for ARM64 and Windows-based containers on Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is submitted to a job queue with only Fargate Spot compute environments. However, you can attach both FARGATE and FARGATE_SPOT compute environments to the same job queue.

" } }, - "documentation":"

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + "documentation":"

An object that represents the compute environment architecture for Batch jobs on Fargate.

" }, "SchedulingPolicyDetail":{ "type":"structure", @@ -3165,7 +3193,7 @@ }, "schedulingPriorityOverride":{ "shape":"Integer", - "documentation":"

The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition.

The minimum supported value is 0 and the maximum supported value is 9999.

" + "documentation":"

The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition and works only within a single share identifier.

The minimum supported value is 0 and the maximum supported value is 9999.

" }, "arrayProperties":{ "shape":"ArrayProperties", @@ -3346,18 +3374,18 @@ "members":{ "hardLimit":{ "shape":"Integer", - "documentation":"

The hard limit for the ulimit type.

" + "documentation":"

The hard limit for the ulimit type.

" }, "name":{ "shape":"String", - "documentation":"

The type of the ulimit.

" + "documentation":"

The type of the ulimit. Valid values are: core | cpu | data | fsize | locks | memlock | msgqueue | nice | nofile | nproc | rss | rtprio | rttime | sigpending | stack.

" }, "softLimit":{ "shape":"Integer", "documentation":"

The soft limit for the ulimit type.

" } }, - "documentation":"

The ulimit settings to pass to the container.

This object isn't applicable to jobs that are running on Fargate resources.

" + "documentation":"

The ulimit settings to pass to the container. For more information, see Ulimit.

This object isn't applicable to jobs that are running on Fargate resources.

" }, "Ulimits":{ "type":"list", diff -Nru awscli-2.15.9/awscli/botocore/data/braket/2019-09-01/service-2.json awscli-2.15.22/awscli/botocore/data/braket/2019-09-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/braket/2019-09-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/braket/2019-09-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -65,6 +65,7 @@ {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, + {"shape":"DeviceOfflineException"}, {"shape":"DeviceRetiredException"}, {"shape":"InternalServiceException"}, {"shape":"ServiceQuotaExceededException"}, @@ -1157,7 +1158,7 @@ }, "JobArn":{ "type":"string", - "pattern":"^arn:aws[a-z\\-]*:braket:[a-z0-9\\-]*:[0-9]{12}:job/.*$" + "pattern":"^arn:aws[a-z\\-]*:braket:[a-z0-9\\-]+:[0-9]{12}:job/.*$" }, "JobCheckpointConfig":{ "type":"structure", diff -Nru awscli-2.15.9/awscli/botocore/data/chatbot/2017-10-11/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/chatbot/2017-10-11/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/chatbot/2017-10-11/endpoint-rule-set-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/chatbot/2017-10-11/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://chatbot.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff -Nru awscli-2.15.9/awscli/botocore/data/chatbot/2017-10-11/paginators-1.json awscli-2.15.22/awscli/botocore/data/chatbot/2017-10-11/paginators-1.json --- awscli-2.15.9/awscli/botocore/data/chatbot/2017-10-11/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/chatbot/2017-10-11/paginators-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff -Nru awscli-2.15.9/awscli/botocore/data/chatbot/2017-10-11/service-2.json awscli-2.15.22/awscli/botocore/data/chatbot/2017-10-11/service-2.json --- awscli-2.15.9/awscli/botocore/data/chatbot/2017-10-11/service-2.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/chatbot/2017-10-11/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,1770 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-10-11", + "endpointPrefix":"chatbot", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"chatbot", + "serviceId":"chatbot", + "signatureVersion":"v4", + "uid":"chatbot-2017-10-11" + }, + "operations":{ + "CreateChimeWebhookConfiguration":{ + "name":"CreateChimeWebhookConfiguration", + "http":{ + "method":"POST", + "requestUri":"/create-chime-webhook-configuration", + "responseCode":201 + }, + "input":{"shape":"CreateChimeWebhookConfigurationRequest"}, + "output":{"shape":"CreateChimeWebhookConfigurationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"CreateChimeWebhookConfigurationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates Chime Webhook Configuration" + }, + "CreateMicrosoftTeamsChannelConfiguration":{ + "name":"CreateMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/create-ms-teams-channel-configuration", + "responseCode":201 + }, + "input":{"shape":"CreateTeamsChannelConfigurationRequest"}, + "output":{"shape":"CreateTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"CreateTeamsChannelConfigurationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates MS Teams Channel Configuration" + }, + "CreateSlackChannelConfiguration":{ + "name":"CreateSlackChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/create-slack-channel-configuration", + "responseCode":201 + }, + "input":{"shape":"CreateSlackChannelConfigurationRequest"}, + "output":{"shape":"CreateSlackChannelConfigurationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"LimitExceededException"}, + {"shape":"InvalidRequestException"}, + {"shape":"CreateSlackChannelConfigurationException"}, + {"shape":"ConflictException"} + ], + "documentation":"Creates Slack Channel Configuration" + }, + "DeleteChimeWebhookConfiguration":{ + "name":"DeleteChimeWebhookConfiguration", + "http":{ + "method":"POST", + "requestUri":"/delete-chime-webhook-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteChimeWebhookConfigurationRequest"}, + "output":{"shape":"DeleteChimeWebhookConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DeleteChimeWebhookConfigurationException"} + ], + "documentation":"Deletes a Chime Webhook Configuration" + }, + "DeleteMicrosoftTeamsChannelConfiguration":{ + "name":"DeleteMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/delete-ms-teams-channel-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteTeamsChannelConfigurationRequest"}, + "output":{"shape":"DeleteTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DeleteTeamsChannelConfigurationException"} + ], + "documentation":"Deletes MS Teams Channel Configuration" + }, + "DeleteMicrosoftTeamsConfiguredTeam":{ + "name":"DeleteMicrosoftTeamsConfiguredTeam", + "http":{ + "method":"POST", + "requestUri":"/delete-ms-teams-configured-teams", + "responseCode":204 + }, + "input":{"shape":"DeleteTeamsConfiguredTeamRequest"}, + "output":{"shape":"DeleteTeamsConfiguredTeamResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DeleteTeamsConfiguredTeamException"} + ], + "documentation":"Deletes the Microsoft Teams team authorization allowing for channels to be configured in that Microsoft Teams team. Note that the Microsoft Teams team must have no channels configured to remove it." + }, + "DeleteMicrosoftTeamsUserIdentity":{ + "name":"DeleteMicrosoftTeamsUserIdentity", + "http":{ + "method":"POST", + "requestUri":"/delete-ms-teams-user-identity", + "responseCode":204 + }, + "input":{"shape":"DeleteMicrosoftTeamsUserIdentityRequest"}, + "output":{"shape":"DeleteMicrosoftTeamsUserIdentityResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DeleteMicrosoftTeamsUserIdentityException"} + ], + "documentation":"Deletes a Teams user identity" + }, + "DeleteSlackChannelConfiguration":{ + "name":"DeleteSlackChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/delete-slack-channel-configuration", + "responseCode":204 + }, + "input":{"shape":"DeleteSlackChannelConfigurationRequest"}, + "output":{"shape":"DeleteSlackChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DeleteSlackChannelConfigurationException"} + ], + "documentation":"Deletes Slack Channel Configuration" + }, + "DeleteSlackUserIdentity":{ + "name":"DeleteSlackUserIdentity", + "http":{ + "method":"POST", + "requestUri":"/delete-slack-user-identity", + "responseCode":204 + }, + "input":{"shape":"DeleteSlackUserIdentityRequest"}, + "output":{"shape":"DeleteSlackUserIdentityResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"DeleteSlackUserIdentityException"} + ], + "documentation":"Deletes a Slack user identity" + }, + "DeleteSlackWorkspaceAuthorization":{ + "name":"DeleteSlackWorkspaceAuthorization", + "http":{ + "method":"POST", + "requestUri":"/delete-slack-workspace-authorization", + "responseCode":204 + }, + "input":{"shape":"DeleteSlackWorkspaceAuthorizationRequest"}, + "output":{"shape":"DeleteSlackWorkspaceAuthorizationResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"DeleteSlackWorkspaceAuthorizationFault"} + ], + "documentation":"Deletes the Slack workspace authorization that allows channels to be configured in that workspace. This requires all configured channels in the workspace to be deleted." + }, + "DescribeChimeWebhookConfigurations":{ + "name":"DescribeChimeWebhookConfigurations", + "http":{ + "method":"POST", + "requestUri":"/describe-chime-webhook-configurations", + "responseCode":200 + }, + "input":{"shape":"DescribeChimeWebhookConfigurationsRequest"}, + "output":{"shape":"DescribeChimeWebhookConfigurationsResult"}, + "errors":[ + {"shape":"DescribeChimeWebhookConfigurationsException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Lists Chime Webhook Configurations optionally filtered by ChatConfigurationArn" + }, + "DescribeSlackChannelConfigurations":{ + "name":"DescribeSlackChannelConfigurations", + "http":{ + "method":"POST", + "requestUri":"/describe-slack-channel-configurations", + "responseCode":200 + }, + "input":{"shape":"DescribeSlackChannelConfigurationsRequest"}, + "output":{"shape":"DescribeSlackChannelConfigurationsResult"}, + "errors":[ + {"shape":"DescribeSlackChannelConfigurationsException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Lists Slack Channel Configurations optionally filtered by ChatConfigurationArn" + }, + "DescribeSlackUserIdentities":{ + "name":"DescribeSlackUserIdentities", + "http":{ + "method":"POST", + "requestUri":"/describe-slack-user-identities", + "responseCode":200 + }, + "input":{"shape":"DescribeSlackUserIdentitiesRequest"}, + "output":{"shape":"DescribeSlackUserIdentitiesResult"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"DescribeSlackUserIdentitiesException"} + ], + "documentation":"Lists all Slack user identities with a mapped role." + }, + "DescribeSlackWorkspaces":{ + "name":"DescribeSlackWorkspaces", + "http":{ + "method":"POST", + "requestUri":"/describe-slack-workspaces", + "responseCode":200 + }, + "input":{"shape":"DescribeSlackWorkspacesRequest"}, + "output":{"shape":"DescribeSlackWorkspacesResult"}, + "errors":[ + {"shape":"DescribeSlackWorkspacesException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"Lists all authorized Slack Workspaces for AWS Account" + }, + "GetAccountPreferences":{ + "name":"GetAccountPreferences", + "http":{ + "method":"POST", + "requestUri":"/get-account-preferences", + "responseCode":200 + }, + "input":{"shape":"GetAccountPreferencesRequest"}, + "output":{"shape":"GetAccountPreferencesResult"}, + "errors":[ + {"shape":"GetAccountPreferencesException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Get Chatbot account level preferences" + }, + "GetMicrosoftTeamsChannelConfiguration":{ + "name":"GetMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/get-ms-teams-channel-configuration", + "responseCode":200 + }, + "input":{"shape":"GetTeamsChannelConfigurationRequest"}, + "output":{"shape":"GetTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"GetTeamsChannelConfigurationException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Get a single MS Teams Channel Configurations" + }, + "ListMicrosoftTeamsChannelConfigurations":{ + "name":"ListMicrosoftTeamsChannelConfigurations", + "http":{ + "method":"POST", + "requestUri":"/list-ms-teams-channel-configurations", + "responseCode":200 + }, + "input":{"shape":"ListTeamsChannelConfigurationsRequest"}, + "output":{"shape":"ListTeamsChannelConfigurationsResult"}, + "errors":[ + {"shape":"ListTeamsChannelConfigurationsException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Lists MS Teams Channel Configurations optionally filtered by TeamId" + }, + "ListMicrosoftTeamsConfiguredTeams":{ + "name":"ListMicrosoftTeamsConfiguredTeams", + "http":{ + "method":"POST", + "requestUri":"/list-ms-teams-configured-teams", + "responseCode":200 + }, + "input":{"shape":"ListMicrosoftTeamsConfiguredTeamsRequest"}, + "output":{"shape":"ListMicrosoftTeamsConfiguredTeamsResult"}, + "errors":[ + {"shape":"ListMicrosoftTeamsConfiguredTeamsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"Lists all authorized MS teams for AWS Account" + }, + "ListMicrosoftTeamsUserIdentities":{ + "name":"ListMicrosoftTeamsUserIdentities", + "http":{ + "method":"POST", + "requestUri":"/list-ms-teams-user-identities", + "responseCode":200 + }, + "input":{"shape":"ListMicrosoftTeamsUserIdentitiesRequest"}, + "output":{"shape":"ListMicrosoftTeamsUserIdentitiesResult"}, + "errors":[ + {"shape":"ListMicrosoftTeamsUserIdentitiesException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"Lists all Microsoft Teams user identities with a mapped role." + }, + "UpdateAccountPreferences":{ + "name":"UpdateAccountPreferences", + "http":{ + "method":"POST", + "requestUri":"/update-account-preferences", + "responseCode":200 + }, + "input":{"shape":"UpdateAccountPreferencesRequest"}, + "output":{"shape":"UpdateAccountPreferencesResult"}, + "errors":[ + {"shape":"UpdateAccountPreferencesException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"} + ], + "documentation":"Update Chatbot account level preferences" + }, + "UpdateChimeWebhookConfiguration":{ + "name":"UpdateChimeWebhookConfiguration", + "http":{ + "method":"POST", + "requestUri":"/update-chime-webhook-configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateChimeWebhookConfigurationRequest"}, + "output":{"shape":"UpdateChimeWebhookConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UpdateChimeWebhookConfigurationException"} + ], + "documentation":"Updates a Chime Webhook Configuration" + }, + "UpdateMicrosoftTeamsChannelConfiguration":{ + "name":"UpdateMicrosoftTeamsChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/update-ms-teams-channel-configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateTeamsChannelConfigurationRequest"}, + "output":{"shape":"UpdateTeamsChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UpdateTeamsChannelConfigurationException"} + ], + "documentation":"Updates MS Teams Channel Configuration" + }, + "UpdateSlackChannelConfiguration":{ + "name":"UpdateSlackChannelConfiguration", + "http":{ + "method":"POST", + "requestUri":"/update-slack-channel-configuration", + "responseCode":200 + }, + "input":{"shape":"UpdateSlackChannelConfigurationRequest"}, + "output":{"shape":"UpdateSlackChannelConfigurationResult"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidParameterException"}, + {"shape":"InvalidRequestException"}, + {"shape":"UpdateSlackChannelConfigurationException"} + ], + "documentation":"Updates Slack Channel Configuration" + } + }, + "shapes":{ + "AccountPreferences":{ + "type":"structure", + "members":{ + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "TrainingDataCollectionEnabled":{ + "shape":"BooleanAccountPreference", + "documentation":"Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AWS Chatbot’s AI technologies." + } + }, + "documentation":"Preferences which apply for AWS Chatbot usage in the calling AWS account." + }, + "Arn":{ + "type":"string", + "max":1224, + "min":12, + "pattern":"^arn:aws:[A-Za-z0-9][A-Za-z0-9_/.-]{0,62}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "AwsUserIdentity":{ + "type":"string", + "max":1101, + "min":15, + "pattern":"^arn:aws:(iam|sts)::[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "BooleanAccountPreference":{"type":"boolean"}, + "ChatConfigurationArn":{ + "type":"string", + "max":1169, + "min":19, + "pattern":"^arn:aws:(wheatley|chatbot):[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9_/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,1023}$" + }, + "ChimeWebhookConfiguration":{ + "type":"structure", + "required":[ + "WebhookDescription", + "ChatConfigurationArn", + "IamRoleArn", + "SnsTopicArns" + ], + "members":{ + "WebhookDescription":{ + "shape":"ChimeWebhookDescription", + "documentation":"Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the ChimeWebhookConfiguration." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.Logging levels include ERROR, INFO, or NONE." + } + }, + "documentation":"An AWS Chatbot configuration for Amazon Chime." + }, + "ChimeWebhookConfigurationList":{ + "type":"list", + "member":{"shape":"ChimeWebhookConfiguration"} + }, + "ChimeWebhookDescription":{ + "type":"string", + "max":255, + "min":1 + }, + "ChimeWebhookUrl":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^https://hooks\\.chime\\.aws/incomingwebhooks/[A-Za-z0-9\\-]+?\\?token=[A-Za-z0-9\\-]+$" + }, + "ConfigurationName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^[A-Za-z0-9-_]+$" + }, + "ConfiguredTeam":{ + "type":"structure", + "required":[ + "TenantId", + "TeamId" + ], + "members":{ + "TenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "TeamName":{ + "shape":"UUID", + "documentation":"The name of the Microsoft Teams Team.", + "box":true + } + }, + "documentation":"A Microsoft Teams team that has been authorized with AWS Chatbot." + }, + "ConfiguredTeamsList":{ + "type":"list", + "member":{"shape":"ConfiguredTeam"} + }, + "ConflictException":{ + "type":"structure", + "members":{ + }, + "documentation":"There was an issue processing your request.", + "error":{"httpStatusCode":409}, + "exception":true + }, + "CreateChimeWebhookConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "CreateChimeWebhookConfigurationRequest":{ + "type":"structure", + "required":[ + "WebhookDescription", + "WebhookUrl", + "SnsTopicArns", + "IamRoleArn", + "ConfigurationName" + ], + "members":{ + "WebhookDescription":{ + "shape":"ChimeWebhookDescription", + "documentation":"Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html." + }, + "WebhookUrl":{ + "shape":"ChimeWebhookUrl", + "documentation":"URL for the Chime webhook." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + } + } + }, + "CreateChimeWebhookConfigurationResult":{ + "type":"structure", + "members":{ + "WebhookConfiguration":{ + "shape":"ChimeWebhookConfiguration", + "documentation":"Chime webhook configuration." + } + } + }, + "CreateSlackChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "CreateSlackChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "SlackTeamId", + "SlackChannelId", + "IamRoleArn", + "ConfigurationName" + ], + "members":{ + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackChannelId":{ + "shape":"SlackChannelId", + "documentation":"The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ." + }, + "SlackChannelName":{ + "shape":"SlackChannelDisplayName", + "documentation":"The name of the Slack Channel." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "CreateSlackChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"SlackChannelConfiguration", + "documentation":"The configuration for a Slack channel configured with AWS Chatbot." + } + } + }, + "CreateTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "CreateTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "ChannelId", + "TeamId", + "TenantId", + "IamRoleArn", + "ConfigurationName" + ], + "members":{ + "ChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "ChannelName":{ + "shape":"TeamsChannelName", + "documentation":"The name of the Microsoft Teams channel." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "TeamName":{ + "shape":"TeamName", + "documentation":"The name of the Microsoft Teams Team." + }, + "TenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "CreateTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"TeamsChannelConfiguration", + "documentation":"The configuration for a Microsoft Teams channel configured with AWS Chatbot." + } + } + }, + "CustomerCwLogLevel":{ + "type":"string", + "max":5, + "min":4, + "pattern":"^(ERROR|INFO|NONE)$" + }, + "DeleteChimeWebhookConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteChimeWebhookConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the ChimeWebhookConfiguration to delete." + } + } + }, + "DeleteChimeWebhookConfigurationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteMicrosoftTeamsUserIdentityException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteMicrosoftTeamsUserIdentityRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "UserId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identity to delete." + }, + "UserId":{ + "shape":"UUID", + "documentation":"Id from Microsoft Teams for user." + } + } + }, + "DeleteMicrosoftTeamsUserIdentityResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteSlackChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteSlackChannelConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration to delete." + } + } + }, + "DeleteSlackChannelConfigurationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteSlackUserIdentityException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteSlackUserIdentityRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "SlackTeamId", + "SlackUserId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration associated with the user identity to delete." + }, + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackUserId":{ + "shape":"SlackUserId", + "documentation":"The ID of the user in Slack." + } + } + }, + "DeleteSlackUserIdentityResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteSlackWorkspaceAuthorizationFault":{ + "type":"structure", + "members":{ + }, + "documentation":"There was an issue deleting your Slack workspace.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteSlackWorkspaceAuthorizationRequest":{ + "type":"structure", + "required":["SlackTeamId"], + "members":{ + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + } + } + }, + "DeleteSlackWorkspaceAuthorizationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration to delete." + } + } + }, + "DeleteTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + } + }, + "DeleteTeamsConfiguredTeamException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DeleteTeamsConfiguredTeamRequest":{ + "type":"structure", + "required":["TeamId"], + "members":{ + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + } + } + }, + "DeleteTeamsConfiguredTeamResult":{ + "type":"structure", + "members":{ + } + }, + "DescribeChimeWebhookConfigurationsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeChimeWebhookConfigurationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.", + "box":true + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.", + "box":true + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"An optional ARN of a ChimeWebhookConfiguration to describe.", + "box":true + } + } + }, + "DescribeChimeWebhookConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "WebhookConfigurations":{ + "shape":"ChimeWebhookConfigurationList", + "documentation":"A list of Chime webhooks associated with the account." + } + } + }, + "DescribeSlackChannelConfigurationsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeSlackChannelConfigurationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.", + "box":true + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.", + "box":true + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"An optional ARN of a SlackChannelConfiguration to describe.", + "box":true + } + } + }, + "DescribeSlackChannelConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "SlackChannelConfigurations":{ + "shape":"SlackChannelConfigurationList", + "documentation":"A list of Slack channel configurations." + } + } + }, + "DescribeSlackUserIdentitiesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeSlackUserIdentitiesRequest":{ + "type":"structure", + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration associated with the user identities to describe." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + } + } + }, + "DescribeSlackUserIdentitiesResult":{ + "type":"structure", + "members":{ + "SlackUserIdentities":{ + "shape":"SlackUserIdentitiesList", + "documentation":"A list of Slack User Identities." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "DescribeSlackWorkspacesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "DescribeSlackWorkspacesRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "DescribeSlackWorkspacesResult":{ + "type":"structure", + "members":{ + "SlackWorkspaces":{ + "shape":"SlackWorkspacesList", + "documentation":"A list of Slack Workspaces registered with AWS Chatbot." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "GetAccountPreferencesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "GetAccountPreferencesRequest":{ + "type":"structure", + "members":{ + } + }, + "GetAccountPreferencesResult":{ + "type":"structure", + "members":{ + "AccountPreferences":{ + "shape":"AccountPreferences", + "documentation":"Preferences which apply for AWS Chatbot usage in the calling AWS account." + } + } + }, + "GetTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "GetTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration to retrieve." + } + } + }, + "GetTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"TeamsChannelConfiguration", + "documentation":"The configuration for a Microsoft Teams channel configured with AWS Chatbot." + } + } + }, + "GuardrailPolicyArn":{ + "type":"string", + "max":1163, + "min":11, + "pattern":"^(^$|(?!.*\\/aws-service-role\\/.*)arn:aws:iam:[A-Za-z0-9_\\/.-]{0,63}:[A-Za-z0-9_\\/.-]{0,63}:[A-Za-z0-9][A-Za-z0-9:_\\/+=,@.-]{0,1023})$" + }, + "GuardrailPolicyArnList":{ + "type":"list", + "member":{"shape":"GuardrailPolicyArn"} + }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + }, + "documentation":"Your request input doesn't meet the constraints that AWS Chatbot requires.", + "error":{"httpStatusCode":400}, + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + }, + "documentation":"Your request input doesn't meet the constraints that AWS Chatbot requires.", + "error":{"httpStatusCode":400}, + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"You have exceeded a service limit for AWS Chatbot.", + "error":{"httpStatusCode":403}, + "exception":true + }, + "ListMicrosoftTeamsConfiguredTeamsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListMicrosoftTeamsConfiguredTeamsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "ListMicrosoftTeamsConfiguredTeamsResult":{ + "type":"structure", + "members":{ + "ConfiguredTeams":{ + "shape":"ConfiguredTeamsList", + "documentation":"A list of teams in Microsoft Teams that have been configured with AWS Chatbot." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "ListMicrosoftTeamsUserIdentitiesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListMicrosoftTeamsUserIdentitiesRequest":{ + "type":"structure", + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identities to list." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved." + } + } + }, + "ListMicrosoftTeamsUserIdentitiesResult":{ + "type":"structure", + "members":{ + "TeamsUserIdentities":{ + "shape":"TeamsUserIdentitiesList", + "documentation":"User level permissions associated to a channel configuration." + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + } + } + }, + "ListTeamsChannelConfigurationsException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListTeamsChannelConfigurationsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.", + "box":true + }, + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults.", + "box":true + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide.", + "box":true + } + } + }, + "ListTeamsChannelConfigurationsResult":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"PaginationToken", + "documentation":"An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults." + }, + "TeamChannelConfigurations":{ + "shape":"TeamChannelConfigurationsList", + "documentation":"A list of AWS Chatbot channel configurations for Microsoft Teams." + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "PaginationToken":{ + "type":"string", + "max":1276, + "min":1, + "pattern":"^[a-zA-Z0-9=\\/+_.\\-,#:\\\\\"{}]{4,1276}$" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"We were not able to find the resource for your request.", + "error":{"httpStatusCode":404}, + "exception":true + }, + "SlackChannelConfiguration":{ + "type":"structure", + "required":[ + "SlackTeamName", + "SlackTeamId", + "SlackChannelId", + "SlackChannelName", + "ChatConfigurationArn", + "IamRoleArn", + "SnsTopicArns" + ], + "members":{ + "SlackTeamName":{ + "shape":"SlackTeamName", + "documentation":"Name of the Slack Workspace." + }, + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackChannelId":{ + "shape":"SlackChannelId", + "documentation":"The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ." + }, + "SlackChannelName":{ + "shape":"SlackChannelDisplayName", + "documentation":"The name of the Slack Channel." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot.", + "box":true + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + }, + "documentation":"An AWS Chatbot configuration for Slack." + }, + "SlackChannelConfigurationList":{ + "type":"list", + "member":{"shape":"SlackChannelConfiguration"} + }, + "SlackChannelDisplayName":{ + "type":"string", + "max":255, + "min":1 + }, + "SlackChannelId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[A-Za-z0-9]+$" + }, + "SlackTeamId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[0-9A-Z]{1,255}$" + }, + "SlackTeamName":{ + "type":"string", + "max":255, + "min":1 + }, + "SlackUserId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(.*)$" + }, + "SlackUserIdentitiesList":{ + "type":"list", + "member":{"shape":"SlackUserIdentity"} + }, + "SlackUserIdentity":{ + "type":"structure", + "required":[ + "IamRoleArn", + "ChatConfigurationArn", + "SlackTeamId", + "SlackUserId" + ], + "members":{ + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration associated with the user identity." + }, + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackUserId":{ + "shape":"SlackUserId", + "documentation":"The ID of the user in Slack." + }, + "AwsUserIdentity":{ + "shape":"AwsUserIdentity", + "documentation":"The AWS user identity ARN used to associate a Slack User Identity with an IAM Role." + } + }, + "documentation":"Identifes a User level permission for a channel configuration." + }, + "SlackWorkspace":{ + "type":"structure", + "required":[ + "SlackTeamId", + "SlackTeamName" + ], + "members":{ + "SlackTeamId":{ + "shape":"SlackTeamId", + "documentation":"The ID of the Slack workspace authorized with AWS Chatbot." + }, + "SlackTeamName":{ + "shape":"SlackTeamName", + "documentation":"Name of the Slack Workspace." + } + }, + "documentation":"A Slack Workspace." + }, + "SlackWorkspacesList":{ + "type":"list", + "member":{"shape":"SlackWorkspace"} + }, + "SnsTopicArnList":{ + "type":"list", + "member":{"shape":"Arn"} + }, + "String":{"type":"string"}, + "TeamChannelConfigurationsList":{ + "type":"list", + "member":{"shape":"TeamsChannelConfiguration"} + }, + "TeamName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^(.*)$" + }, + "TeamsChannelConfiguration":{ + "type":"structure", + "required":[ + "ChannelId", + "TeamId", + "TenantId", + "ChatConfigurationArn", + "IamRoleArn", + "SnsTopicArns" + ], + "members":{ + "ChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "ChannelName":{ + "shape":"TeamsChannelName", + "documentation":"The name of the Microsoft Teams channel." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "TeamName":{ + "shape":"String", + "documentation":"The name of the Microsoft Teams Team." + }, + "TenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot.", + "box":true + }, + "ConfigurationName":{ + "shape":"ConfigurationName", + "documentation":"The name of the configuration." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + }, + "documentation":"An AWS Chatbot configuration for Microsoft Teams." + }, + "TeamsChannelId":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^([a-zA-Z0-9-_=+\\/.,])*%3[aA]([a-zA-Z0-9-_=+\\/.,])*%40([a-zA-Z0-9-_=+\\/.,])*$" + }, + "TeamsChannelName":{ + "type":"string", + "max":1000, + "min":1, + "pattern":"^(.*)$" + }, + "TeamsUserIdentitiesList":{ + "type":"list", + "member":{"shape":"TeamsUserIdentity"} + }, + "TeamsUserIdentity":{ + "type":"structure", + "required":[ + "IamRoleArn", + "ChatConfigurationArn", + "TeamId" + ], + "members":{ + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identity." + }, + "TeamId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide." + }, + "UserId":{ + "shape":"UUID", + "documentation":"Id from Microsoft Teams for user." + }, + "AwsUserIdentity":{ + "shape":"AwsUserIdentity", + "documentation":"The AWS user identity ARN used to associate a Microsoft Teams User Identity with an IAM Role." + }, + "TeamsChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "TeamsTenantId":{ + "shape":"UUID", + "documentation":"The ID of the Microsoft Teams tenant." + } + }, + "documentation":"Identifes a user level permission for a channel configuration." + }, + "UUID":{ + "type":"string", + "max":36, + "min":36, + "pattern":"^[0-9A-Fa-f]{8}(?:-[0-9A-Fa-f]{4}){3}-[0-9A-Fa-f]{12}$" + }, + "UpdateAccountPreferencesException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateAccountPreferencesRequest":{ + "type":"structure", + "members":{ + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + }, + "TrainingDataCollectionEnabled":{ + "shape":"BooleanAccountPreference", + "documentation":"Turns on training data collection. This helps improve the AWS Chatbot experience by allowing AWS Chatbot to store and use your customer information, such as AWS Chatbot configurations, notifications, user inputs, AWS Chatbot generated responses, and interaction data. This data helps us to continuously improve and develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for training AWS Chatbot’s AI technologies." + } + } + }, + "UpdateAccountPreferencesResult":{ + "type":"structure", + "members":{ + "AccountPreferences":{ + "shape":"AccountPreferences", + "documentation":"Preferences which apply for AWS Chatbot usage in the calling AWS account." + } + } + }, + "UpdateChimeWebhookConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateChimeWebhookConfigurationRequest":{ + "type":"structure", + "required":["ChatConfigurationArn"], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the ChimeWebhookConfiguration to update." + }, + "WebhookDescription":{ + "shape":"ChimeWebhookDescription", + "documentation":"Description of the webhook. Recommend using the convention `RoomName/WebhookName`. See Chime setup tutorial for more details: https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html." + }, + "WebhookUrl":{ + "shape":"ChimeWebhookUrl", + "documentation":"URL for the Chime webhook." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + } + } + }, + "UpdateChimeWebhookConfigurationResult":{ + "type":"structure", + "members":{ + "WebhookConfiguration":{ + "shape":"ChimeWebhookConfiguration", + "documentation":"Chime webhook configuration." + } + } + }, + "UpdateSlackChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateSlackChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "SlackChannelId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the SlackChannelConfiguration to update." + }, + "SlackChannelId":{ + "shape":"SlackChannelId", + "documentation":"The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ." + }, + "SlackChannelName":{ + "shape":"SlackChannelDisplayName", + "documentation":"The name of the Slack Channel." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "UpdateSlackChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"SlackChannelConfiguration", + "documentation":"The configuration for a Slack channel configured with AWS Chatbot." + } + } + }, + "UpdateTeamsChannelConfigurationException":{ + "type":"structure", + "members":{ + }, + "documentation":"We can’t process your request right now because of a server issue. Try again later.", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "UpdateTeamsChannelConfigurationRequest":{ + "type":"structure", + "required":[ + "ChatConfigurationArn", + "ChannelId" + ], + "members":{ + "ChatConfigurationArn":{ + "shape":"ChatConfigurationArn", + "documentation":"The ARN of the MicrosoftTeamsChannelConfiguration to update." + }, + "ChannelId":{ + "shape":"TeamsChannelId", + "documentation":"The ID of the Microsoft Teams channel." + }, + "ChannelName":{ + "shape":"TeamsChannelName", + "documentation":"The name of the Microsoft Teams channel." + }, + "SnsTopicArns":{ + "shape":"SnsTopicArnList", + "documentation":"The ARNs of the SNS topics that deliver notifications to AWS Chatbot." + }, + "IamRoleArn":{ + "shape":"Arn", + "documentation":"The ARN of the IAM role that defines the permissions for AWS Chatbot. This is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see IAM Policies for AWS Chatbot." + }, + "LoggingLevel":{ + "shape":"CustomerCwLogLevel", + "documentation":"Logging levels include ERROR, INFO, or NONE." + }, + "GuardrailPolicyArns":{ + "shape":"GuardrailPolicyArnList", + "documentation":"The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is not set." + }, + "UserAuthorizationRequired":{ + "shape":"BooleanAccountPreference", + "documentation":"Enables use of a user role requirement in your chat configuration." + } + } + }, + "UpdateTeamsChannelConfigurationResult":{ + "type":"structure", + "members":{ + "ChannelConfiguration":{ + "shape":"TeamsChannelConfiguration", + "documentation":"The configuration for a Microsoft Teams channel configured with AWS Chatbot." + } + } + } + }, + "documentation":"AWS Chatbot API" +} diff -Nru awscli-2.15.9/awscli/botocore/data/cloud9/2017-09-23/service-2.json awscli-2.15.22/awscli/botocore/data/cloud9/2017-09-23/service-2.json --- awscli-2.15.9/awscli/botocore/data/cloud9/2017-09-23/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cloud9/2017-09-23/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -331,7 +331,7 @@ }, "imageId":{ "shape":"ImageId", - "documentation":"

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users.

From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users.

Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.

AMI aliases

SSM paths

" + "documentation":"

The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path.

From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users.

We recommend using Amazon Linux 2023 as the AMI to create your environment as it is fully supported.

Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04.

AMI aliases

SSM paths

" }, "automaticStopTimeMinutes":{ "shape":"AutomaticStopTimeMinutes", diff -Nru awscli-2.15.9/awscli/botocore/data/cloudformation/2010-05-15/paginators-1.json awscli-2.15.22/awscli/botocore/data/cloudformation/2010-05-15/paginators-1.json --- awscli-2.15.9/awscli/botocore/data/cloudformation/2010-05-15/paginators-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cloudformation/2010-05-15/paginators-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -95,6 +95,30 @@ "limit_key": "MaxResults", "output_token": "NextToken", "result_key": "TypeSummaries" + }, + "ListGeneratedTemplates": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Summaries" + }, + "ListResourceScanRelatedResources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "RelatedResources" + }, + "ListResourceScanResources": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "Resources" + }, + "ListResourceScans": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "ResourceScanSummaries" } } } diff -Nru awscli-2.15.9/awscli/botocore/data/cloudformation/2010-05-15/service-2.json awscli-2.15.22/awscli/botocore/data/cloudformation/2010-05-15/service-2.json --- awscli-2.15.9/awscli/botocore/data/cloudformation/2010-05-15/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cloudformation/2010-05-15/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -109,6 +109,24 @@ ], "documentation":"

Creates a list of changes that will be applied to a stack so that you can review the changes before executing them. You can create a change set for a stack that doesn't exist or an existing stack. If you create a change set for a stack that doesn't exist, the change set shows all of the resources that CloudFormation will create. If you create a change set for an existing stack, CloudFormation compares the stack's information with the information that you submit in the change set and lists the differences. Use change sets to understand which resources CloudFormation will create or change, and how it will change resources in an existing stack, before you create or update a stack.

To create a change set for a stack that doesn't exist, for the ChangeSetType parameter, specify CREATE. To create a change set for an existing stack, specify UPDATE for the ChangeSetType parameter. To create a change set for an import operation, specify IMPORT for the ChangeSetType parameter. After the CreateChangeSet call successfully completes, CloudFormation starts creating the change set. To check the status of the change set or to review it, use the DescribeChangeSet action.

When you are satisfied with the changes the change set will make, execute the change set by using the ExecuteChangeSet action. CloudFormation doesn't make changes until you execute the change set.

To create a change set for the entire stack hierarchy, set IncludeNestedStacks to True.

" }, + "CreateGeneratedTemplate":{ + "name":"CreateGeneratedTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateGeneratedTemplateInput"}, + "output":{ + "shape":"CreateGeneratedTemplateOutput", + "resultWrapper":"CreateGeneratedTemplateResult" + }, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentResourcesLimitExceededException"} + ], + "documentation":"

Creates a template from existing resources that are not already managed with CloudFormation. You can check the status of the template generation using the DescribeGeneratedTemplate API action.

" + }, "CreateStack":{ "name":"CreateStack", "http":{ @@ -218,6 +236,19 @@ ], "documentation":"

Deletes the specified change set. Deleting change sets ensures that no one executes the wrong change set.

If the call successfully completes, CloudFormation successfully deleted the change set.

If IncludeNestedStacks specifies True during the creation of the nested change set, then DeleteChangeSet will delete all change sets that belong to the stacks hierarchy and will also delete all change sets for nested stacks with the status of REVIEW_IN_PROGRESS.

" }, + "DeleteGeneratedTemplate":{ + "name":"DeleteGeneratedTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteGeneratedTemplateInput"}, + "errors":[ + {"shape":"GeneratedTemplateNotFoundException"}, + {"shape":"ConcurrentResourcesLimitExceededException"} + ], + "documentation":"

Deleted a generated template.

" + }, "DeleteStack":{ "name":"DeleteStack", "http":{ @@ -330,6 +361,22 @@ ], "documentation":"

Returns hook-related information for the change set and a list of changes that CloudFormation makes when you run the change set.

" }, + "DescribeGeneratedTemplate":{ + "name":"DescribeGeneratedTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeGeneratedTemplateInput"}, + "output":{ + "shape":"DescribeGeneratedTemplateOutput", + "resultWrapper":"DescribeGeneratedTemplateResult" + }, + "errors":[ + {"shape":"GeneratedTemplateNotFoundException"} + ], + "documentation":"

Describes a generated template. The output includes details about the progress of the creation of a generated template started by a CreateGeneratedTemplate API action or the update of a generated template started with an UpdateGeneratedTemplate API action.

" + }, "DescribeOrganizationsAccess":{ "name":"DescribeOrganizationsAccess", "http":{ @@ -364,6 +411,22 @@ "documentation":"

Returns information about a CloudFormation extension publisher.

If you don't supply a PublisherId, and you have registered as an extension publisher, DescribePublisher returns information about your own publisher account.

For more information about registering as a publisher, see:

", "idempotent":true }, + "DescribeResourceScan":{ + "name":"DescribeResourceScan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeResourceScanInput"}, + "output":{ + "shape":"DescribeResourceScanOutput", + "resultWrapper":"DescribeResourceScanResult" + }, + "errors":[ + {"shape":"ResourceScanNotFoundException"} + ], + "documentation":"

Describes details of a resource scan.

" + }, "DescribeStackDriftDetectionStatus":{ "name":"DescribeStackDriftDetectionStatus", "http":{ @@ -603,6 +666,22 @@ ], "documentation":"

Updates a stack using the input information that was provided when the specified change set was created. After the call successfully completes, CloudFormation starts updating the stack. Use the DescribeStacks action to view the status of the update.

When you execute a change set, CloudFormation deletes all other change sets associated with the stack because they aren't valid for the updated stack.

If a stack policy is associated with the stack, CloudFormation enforces the policy during the update. You can't specify a temporary stack policy that overrides the current policy.

To create a change set for the entire stack hierarchy, IncludeNestedStacks must have been set to True.

" }, + "GetGeneratedTemplate":{ + "name":"GetGeneratedTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetGeneratedTemplateInput"}, + "output":{ + "shape":"GetGeneratedTemplateOutput", + "resultWrapper":"GetGeneratedTemplateResult" + }, + "errors":[ + {"shape":"GeneratedTemplateNotFoundException"} + ], + "documentation":"

Retrieves a generated template. If the template is in an InProgress or Pending status then the template returned will be the template when the template was last in a Complete status. If the template has not yet been in a Complete status then an empty template will be returned.

" + }, "GetStackPolicy":{ "name":"GetStackPolicy", "http":{ @@ -696,6 +775,19 @@ }, "documentation":"

Lists all exported output values in the account and Region in which you call this action. Use this action to see the exported output values that you can import into other stacks. To import values, use the Fn::ImportValue function.

For more information, see CloudFormation export stack output values.

" }, + "ListGeneratedTemplates":{ + "name":"ListGeneratedTemplates", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListGeneratedTemplatesInput"}, + "output":{ + "shape":"ListGeneratedTemplatesOutput", + "resultWrapper":"ListGeneratedTemplatesResult" + }, + "documentation":"

Lists your generated templates in this Region.

" + }, "ListImports":{ "name":"ListImports", "http":{ @@ -709,6 +801,53 @@ }, "documentation":"

Lists all stacks that are importing an exported output value. To modify or remove an exported output value, first use this action to see which stacks are using it. To see the exported output values in your account, see ListExports.

For more information about importing an exported output value, see the Fn::ImportValue function.

" }, + "ListResourceScanRelatedResources":{ + "name":"ListResourceScanRelatedResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourceScanRelatedResourcesInput"}, + "output":{ + "shape":"ListResourceScanRelatedResourcesOutput", + "resultWrapper":"ListResourceScanRelatedResourcesResult" + }, + "errors":[ + {"shape":"ResourceScanNotFoundException"}, + {"shape":"ResourceScanInProgressException"} + ], + "documentation":"

Lists the related resources for a list of resources from a resource scan. The response indicates whether each returned resource is already managed by CloudFormation.

" + }, + "ListResourceScanResources":{ + "name":"ListResourceScanResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourceScanResourcesInput"}, + "output":{ + "shape":"ListResourceScanResourcesOutput", + "resultWrapper":"ListResourceScanResourcesResult" + }, + "errors":[ + {"shape":"ResourceScanNotFoundException"}, + {"shape":"ResourceScanInProgressException"} + ], + "documentation":"

Lists the resources from a resource scan. The results can be filtered by resource identifier, resource type prefix, tag key, and tag value. Only resources that match all specified filters are returned. The response indicates whether each returned resource is already managed by CloudFormation.

" + }, + "ListResourceScans":{ + "name":"ListResourceScans", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourceScansInput"}, + "output":{ + "shape":"ListResourceScansOutput", + "resultWrapper":"ListResourceScansResult" + }, + "documentation":"

List the resource scans from newest to oldest. By default it will return up to 10 resource scans.

" + }, "ListStackInstanceResourceDrifts":{ "name":"ListStackInstanceResourceDrifts", "http":{ @@ -1005,6 +1144,23 @@ "input":{"shape":"SignalResourceInput"}, "documentation":"

Sends a signal to the specified resource with a success or failure status. You can use the SignalResource operation in conjunction with a creation policy or update policy. CloudFormation doesn't proceed with a stack creation or update until resources receive the required number of signals or the timeout period is exceeded. The SignalResource operation is useful in cases where you want to send signals from anywhere other than an Amazon EC2 instance.

" }, + "StartResourceScan":{ + "name":"StartResourceScan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartResourceScanInput"}, + "output":{ + "shape":"StartResourceScanOutput", + "resultWrapper":"StartResourceScanResult" + }, + "errors":[ + {"shape":"ResourceScanInProgressException"}, + {"shape":"ResourceScanLimitExceededException"} + ], + "documentation":"

Starts a scan of the resources in this account in this Region. You can the status of a scan using the ListResourceScans API action.

" + }, "StopStackSetOperation":{ "name":"StopStackSetOperation", "http":{ @@ -1041,6 +1197,24 @@ "documentation":"

Tests a registered extension to make sure it meets all necessary requirements for being published in the CloudFormation registry.

For more information, see Testing your public extension prior to publishing in the CloudFormation CLI User Guide.

If you don't specify a version, CloudFormation uses the default version of the extension in your account and Region for testing.

To perform testing, CloudFormation assumes the execution role specified when the type was registered. For more information, see RegisterType.

Once you've initiated testing on an extension using TestType, you can pass the returned TypeVersionArn into DescribeType to monitor the current test status and test status description for the extension.

An extension must have a test status of PASSED before it can be published. For more information, see Publishing extensions to make them available for public use in the CloudFormation CLI User Guide.

", "idempotent":true }, + "UpdateGeneratedTemplate":{ + "name":"UpdateGeneratedTemplate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateGeneratedTemplateInput"}, + "output":{ + "shape":"UpdateGeneratedTemplateOutput", + "resultWrapper":"UpdateGeneratedTemplateResult" + }, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"GeneratedTemplateNotFoundException"}, + {"shape":"LimitExceededException"} + ], + "documentation":"

Updates a generated template. This can be used to change the name, add and remove resources, refresh resources, and change the DeletionPolicy and UpdateReplacePolicy settings. You can check the status of the update to the generated template using the DescribeGeneratedTemplate API action.

" + }, "UpdateStack":{ "name":"UpdateStack", "http":{ @@ -1662,6 +1836,18 @@ "SOFT_FAILURE_TOLERANCE" ] }, + "ConcurrentResourcesLimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

No more than 5 generated templates can be in an InProgress or Pending status at one time. This error is also returned if a generated template that is in an InProgress or Pending status is attempted to be updated or deleted.

", + "error":{ + "code":"ConcurrentResourcesLimitExceeded", + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, "ConfigurationSchema":{ "type":"string", "max":60000, @@ -1803,6 +1989,37 @@ }, "documentation":"

The output for the CreateChangeSet action.

" }, + "CreateGeneratedTemplateInput":{ + "type":"structure", + "required":["GeneratedTemplateName"], + "members":{ + "Resources":{ + "shape":"ResourceDefinitions", + "documentation":"

An optional list of resources to be included in the generated template.

If no resources are specified,the template will be created without any resources. Resources can be added to the template using the UpdateGeneratedTemplate API action.

" + }, + "GeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

The name assigned to the generated template.

" + }, + "StackName":{ + "shape":"StackName", + "documentation":"

An optional name or ARN of a stack to use as the base stack for the generated template.

" + }, + "TemplateConfiguration":{ + "shape":"TemplateConfiguration", + "documentation":"

The configuration details of the generated template, including the DeletionPolicy and UpdateReplacePolicy.

" + } + } + }, + "CreateGeneratedTemplateOutput":{ + "type":"structure", + "members":{ + "GeneratedTemplateId":{ + "shape":"GeneratedTemplateId", + "documentation":"

The ID of the generated template.

" + } + } + }, "CreateStackInput":{ "type":"structure", "required":["StackName"], @@ -1833,7 +2050,7 @@ }, "TimeoutInMinutes":{ "shape":"TimeoutMinutes", - "documentation":"

The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false, the stack will be rolled back.

" + "documentation":"

The amount of time that can pass before the stack status becomes CREATE_FAILED; if DisableRollback is not set or is set to false, the stack will be rolled back.

" }, "NotificationARNs":{ "shape":"NotificationARNs", @@ -2085,6 +2302,16 @@ }, "documentation":"

The output for the DeleteChangeSet action.

" }, + "DeleteGeneratedTemplateInput":{ + "type":"structure", + "required":["GeneratedTemplateName"], + "members":{ + "GeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

The name or Amazon Resource Name (ARN) of a generated template.

" + } + } + }, "DeleteStackInput":{ "type":"structure", "required":["StackName"], @@ -2423,6 +2650,65 @@ }, "documentation":"

The output for the DescribeChangeSet action.

" }, + "DescribeGeneratedTemplateInput":{ + "type":"structure", + "required":["GeneratedTemplateName"], + "members":{ + "GeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

The name or Amazon Resource Name (ARN) of a generated template.

" + } + } + }, + "DescribeGeneratedTemplateOutput":{ + "type":"structure", + "members":{ + "GeneratedTemplateId":{ + "shape":"GeneratedTemplateId", + "documentation":"

The Amazon Resource Name (ARN) of the generated template. The format is arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example, arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc .

" + }, + "GeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

The name of the generated template.

" + }, + "Resources":{ + "shape":"ResourceDetails", + "documentation":"

A list of objects describing the details of the resources in the template generation.

" + }, + "Status":{ + "shape":"GeneratedTemplateStatus", + "documentation":"

The status of the template generation. Supported values are:

" + }, + "StatusReason":{ + "shape":"TemplateStatusReason", + "documentation":"

The reason for the current template generation status. This will provide more details if a failure happened.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time the generated template was created.

" + }, + "LastUpdatedTime":{ + "shape":"LastUpdatedTime", + "documentation":"

The time the generated template was last updated.

" + }, + "Progress":{ + "shape":"TemplateProgress", + "documentation":"

An object describing the progress of the template generation.

" + }, + "StackId":{ + "shape":"StackId", + "documentation":"

The stack ARN of the base stack if a base stack was provided when generating the template.

" + }, + "TemplateConfiguration":{ + "shape":"TemplateConfiguration", + "documentation":"

The configuration details of the generated template, including the DeletionPolicy and UpdateReplacePolicy.

" + }, + "TotalWarnings":{ + "shape":"TotalWarnings", + "documentation":"

The number of warnings generated for this template. The warnings are found in the details of each of the resources in the template.

" + } + } + }, "DescribeOrganizationsAccessInput":{ "type":"structure", "members":{ @@ -2471,6 +2757,57 @@ } } }, + "DescribeResourceScanInput":{ + "type":"structure", + "required":["ResourceScanId"], + "members":{ + "ResourceScanId":{ + "shape":"ResourceScanId", + "documentation":"

The Amazon Resource Name (ARN) of the resource scan.

" + } + } + }, + "DescribeResourceScanOutput":{ + "type":"structure", + "members":{ + "ResourceScanId":{ + "shape":"ResourceScanId", + "documentation":"

The Amazon Resource Name (ARN) of the resource scan. The format is arn:${Partition}:cloudformation:${Region}:${Account}:resourceScan/${Id}. An example is arn:aws:cloudformation:us-east-1:123456789012:resourceScan/f5b490f7-7ed4-428a-aa06-31ff25db0772 .

" + }, + "Status":{ + "shape":"ResourceScanStatus", + "documentation":"

Status of the resource scan.

INPROGRESS

The resource scan is still in progress.

COMPLETE

The resource scan is complete.

EXPIRED

The resource scan has expired.

FAILED

The resource scan has failed.

" + }, + "StatusReason":{ + "shape":"ResourceScanStatusReason", + "documentation":"

The reason for the resource scan status, providing more information if a failure happened.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time that the resource scan was started.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the resource scan was finished.

" + }, + "PercentageCompleted":{ + "shape":"PercentageCompleted", + "documentation":"

The percentage of the resource scan that has been completed.

" + }, + "ResourceTypes":{ + "shape":"ResourceTypes", + "documentation":"

The list of resource types for the specified scan. Resource types are only available for scans with a Status set to COMPLETE or FAILED .

" + }, + "ResourcesScanned":{ + "shape":"ResourcesScanned", + "documentation":"

The number of resources that were listed. This is only available for scans with a Status set to COMPLETE, EXPIRED, or FAILED .

" + }, + "ResourcesRead":{ + "shape":"ResourcesRead", + "documentation":"

The number of resources that were read. This is only available for scans with a Status set to COMPLETE, EXPIRED, or FAILED .

This field may be 0 if the resource scan failed with a ResourceScanLimitExceededException.

" + } + } + }, "DescribeStackDriftDetectionStatusInput":{ "type":"structure", "required":["StackDriftDetectionId"], @@ -3164,6 +3501,91 @@ "max":100, "min":0 }, + "GeneratedTemplateDeletionPolicy":{ + "type":"string", + "enum":[ + "DELETE", + "RETAIN" + ] + }, + "GeneratedTemplateId":{ + "type":"string", + "max":256, + "min":1 + }, + "GeneratedTemplateName":{ + "type":"string", + "max":128, + "min":1 + }, + "GeneratedTemplateNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The generated template was not found.

", + "error":{ + "code":"GeneratedTemplateNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "GeneratedTemplateResourceStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "FAILED", + "COMPLETE" + ] + }, + "GeneratedTemplateStatus":{ + "type":"string", + "enum":[ + "CREATE_PENDING", + "UPDATE_PENDING", + "DELETE_PENDING", + "CREATE_IN_PROGRESS", + "UPDATE_IN_PROGRESS", + "DELETE_IN_PROGRESS", + "FAILED", + "COMPLETE" + ] + }, + "GeneratedTemplateUpdateReplacePolicy":{ + "type":"string", + "enum":[ + "DELETE", + "RETAIN" + ] + }, + "GetGeneratedTemplateInput":{ + "type":"structure", + "required":["GeneratedTemplateName"], + "members":{ + "Format":{ + "shape":"TemplateFormat", + "documentation":"

The language to use to retrieve for the generated template. Supported values are:

" + }, + "GeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

The name or Amazon Resource Name (ARN) of the generated template. The format is arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example, arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc .

" + } + } + }, + "GetGeneratedTemplateOutput":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"GeneratedTemplateStatus", + "documentation":"

The status of the template generation. Supported values are:

" + }, + "TemplateBody":{ + "shape":"TemplateBody", + "documentation":"

The template body of the generated template, in the language specified by the Language parameter.

" + } + } + }, "GetStackPolicyInput":{ "type":"structure", "required":["StackName"], @@ -3496,6 +3918,19 @@ "IsActivated":{"type":"boolean"}, "IsDefaultConfiguration":{"type":"boolean"}, "IsDefaultVersion":{"type":"boolean"}, + "JazzLogicalResourceIds":{ + "type":"list", + "member":{"shape":"LogicalResourceId"}, + "max":500, + "min":1 + }, + "JazzResourceIdentifierProperties":{ + "type":"map", + "key":{"shape":"JazzResourceIdentifierPropertyKey"}, + "value":{"shape":"JazzResourceIdentifierPropertyValue"} + }, + "JazzResourceIdentifierPropertyKey":{"type":"string"}, + "JazzResourceIdentifierPropertyValue":{"type":"string"}, "Key":{"type":"string"}, "LastUpdatedTime":{"type":"timestamp"}, "LimitExceededException":{ @@ -3563,6 +3998,32 @@ } } }, + "ListGeneratedTemplatesInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string that identifies the next page of resource scan results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

If the number of available results exceeds this maximum, the response includes a NextToken value that you can use for the NextToken parameter to get the next set of results. By default the ListGeneratedTemplates API action will return at most 50 results in each response. The maximum value is 100.

" + } + } + }, + "ListGeneratedTemplatesOutput":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"TemplateSummaries", + "documentation":"

A list of summaries of the generated templates.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListGeneratedTemplates again and use that value for the NextToken parameter. If the request returns all results, NextToken is set to an empty string.

" + } + } + }, "ListImportsInput":{ "type":"structure", "required":["ExportName"], @@ -3590,6 +4051,117 @@ } } }, + "ListResourceScanRelatedResourcesInput":{ + "type":"structure", + "required":[ + "ResourceScanId", + "Resources" + ], + "members":{ + "ResourceScanId":{ + "shape":"ResourceScanId", + "documentation":"

The Amazon Resource Name (ARN) of the resource scan.

" + }, + "Resources":{ + "shape":"ScannedResourceIdentifiers", + "documentation":"

The list of resources for which you want to get the related resources. Up to 100 resources can be provided.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string that identifies the next page of resource scan results.

" + }, + "MaxResults":{ + "shape":"BoxedMaxResults", + "documentation":"

If the number of available results exceeds this maximum, the response includes a NextToken value that you can use for the NextToken parameter to get the next set of results. By default the ListResourceScanRelatedResources API action will return up to 100 results in each response. The maximum value is 100.

" + } + } + }, + "ListResourceScanRelatedResourcesOutput":{ + "type":"structure", + "members":{ + "RelatedResources":{ + "shape":"RelatedResources", + "documentation":"

List of up to MaxResults resources in the specified resource scan related to the specified resources.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListResourceScanRelatedResources again and use that value for the NextToken parameter. If the request returns all results, NextToken is set to an empty string.

" + } + } + }, + "ListResourceScanResourcesInput":{ + "type":"structure", + "required":["ResourceScanId"], + "members":{ + "ResourceScanId":{ + "shape":"ResourceScanId", + "documentation":"

The Amazon Resource Name (ARN) of the resource scan.

" + }, + "ResourceIdentifier":{ + "shape":"ResourceIdentifier", + "documentation":"

If specified, the returned resources will have the specified resource identifier (or one of them in the case where the resource has multiple identifiers).

" + }, + "ResourceTypePrefix":{ + "shape":"ResourceTypePrefix", + "documentation":"

If specified, the returned resources will be of any of the resource types with the specified prefix.

" + }, + "TagKey":{ + "shape":"TagKey", + "documentation":"

If specified, the returned resources will have a matching tag key.

" + }, + "TagValue":{ + "shape":"TagValue", + "documentation":"

If specified, the returned resources will have a matching tag value.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string that identifies the next page of resource scan results.

" + }, + "MaxResults":{ + "shape":"ResourceScannerMaxResults", + "documentation":"

If the number of available results exceeds this maximum, the response includes a NextToken value that you can use for the NextToken parameter to get the next set of results. By default the ListResourceScanResources API action will return at most 100 results in each response. The maximum value is 100.

" + } + } + }, + "ListResourceScanResourcesOutput":{ + "type":"structure", + "members":{ + "Resources":{ + "shape":"ScannedResources", + "documentation":"

List of up to MaxResults resources in the specified resource scan that match all of the specified filters.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListResourceScanResources again and use that value for the NextToken parameter. If the request returns all results, NextToken is set to an empty string.

" + } + } + }, + "ListResourceScansInput":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

A string that identifies the next page of resource scan results.

" + }, + "MaxResults":{ + "shape":"ResourceScannerMaxResults", + "documentation":"

If the number of available results exceeds this maximum, the response includes a NextToken value that you can use for the NextToken parameter to get the next set of results. The default value is 10. The maximum value is 100.

" + } + } + }, + "ListResourceScansOutput":{ + "type":"structure", + "members":{ + "ResourceScanSummaries":{ + "shape":"ResourceScanSummaries", + "documentation":"

The list of scans returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListResourceScans again and use that value for the NextToken parameter. If the request returns all results, NextToken is set to an empty string.

" + } + } + }, "ListStackInstanceResourceDriftsInput":{ "type":"structure", "required":[ @@ -4036,6 +4608,7 @@ "max":100000, "min":1 }, + "ManagedByStack":{"type":"boolean"}, "ManagedExecution":{ "type":"structure", "members":{ @@ -4105,6 +4678,10 @@ "member":{"shape":"NotificationARN"}, "max":5 }, + "NumberOfResources":{ + "type":"integer", + "min":0 + }, "OnFailure":{ "type":"string", "enum":[ @@ -4328,6 +4905,7 @@ "type":"list", "member":{"shape":"Parameter"} }, + "PercentageCompleted":{"type":"double"}, "PermissionModels":{ "type":"string", "enum":[ @@ -4365,6 +4943,7 @@ "pattern":"arn:aws[A-Za-z0-9-]{0,64}:cloudformation:[A-Za-z0-9-]{1,64}:[0-9]{12}:type/.+" }, "Properties":{"type":"string"}, + "PropertyDescription":{"type":"string"}, "PropertyDifference":{ "type":"structure", "required":[ @@ -4510,6 +5089,7 @@ "members":{ } }, + "RefreshAllResources":{"type":"boolean"}, "Region":{ "type":"string", "pattern":"^[a-zA-Z0-9-]{1,128}$" @@ -4615,6 +5195,10 @@ "HOOK" ] }, + "RelatedResources":{ + "type":"list", + "member":{"shape":"ScannedResource"} + }, "Replacement":{ "type":"string", "enum":[ @@ -4655,6 +5239,7 @@ "type":"list", "member":{"shape":"RequiredActivatedType"} }, + "RequiredProperty":{"type":"boolean"}, "RequiresRecreation":{ "type":"string", "enum":[ @@ -4743,6 +5328,71 @@ "type":"list", "member":{"shape":"ResourceChangeDetail"} }, + "ResourceDefinition":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceIdentifier" + ], + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see IaC generator supported resource types in the CloudFormation User Guide

" + }, + "LogicalResourceId":{ + "shape":"LogicalResourceId", + "documentation":"

The logical resource id for this resource in the generated template.

" + }, + "ResourceIdentifier":{ + "shape":"ResourceIdentifierProperties", + "documentation":"

A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development.

" + } + }, + "documentation":"

A resource included in a generated template. This data type is used with the CreateGeneratedTemplate and UpdateGeneratedTemplate API actions.

" + }, + "ResourceDefinitions":{ + "type":"list", + "member":{"shape":"ResourceDefinition"}, + "max":500, + "min":1 + }, + "ResourceDetail":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see IaC generator supported resource types In the CloudFormation User Guide

" + }, + "LogicalResourceId":{ + "shape":"LogicalResourceId", + "documentation":"

The logical id for this resource in the final generated template.

" + }, + "ResourceIdentifier":{ + "shape":"ResourceIdentifierProperties", + "documentation":"

A list of up to 256 key-value pairs that identifies the resource in the generated template. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development.

" + }, + "ResourceStatus":{ + "shape":"GeneratedTemplateResourceStatus", + "documentation":"

Status of the processing of a resource in a generated template.

InProgress

The resource processing is still in progress.

Complete

The resource processing is complete.

Pending

The resource processing is pending.

Failed

The resource processing has failed.

" + }, + "ResourceStatusReason":{ + "shape":"ResourceStatusReason", + "documentation":"

The reason for the resource detail, providing more information if a failure happened.

" + }, + "Warnings":{ + "shape":"WarningDetails", + "documentation":"

The warnings generated for this resource.

" + } + }, + "documentation":"

Details about a resource in a generated template

" + }, + "ResourceDetails":{ + "type":"list", + "member":{"shape":"ResourceDetail"}, + "max":500, + "min":1 + }, + "ResourceIdentifier":{"type":"string"}, "ResourceIdentifierProperties":{ "type":"map", "key":{"shape":"ResourceIdentifierPropertyKey"}, @@ -4792,6 +5442,91 @@ "min":1 }, "ResourceProperties":{"type":"string"}, + "ResourceScanId":{"type":"string"}, + "ResourceScanInProgressException":{ + "type":"structure", + "members":{ + }, + "documentation":"

A resource scan is currently in progress. Only one can be run at a time for an account in a Region.

", + "error":{ + "code":"ResourceScanInProgress", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ResourceScanLimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The limit on resource scans has been exceeded. Reasons include:

", + "error":{ + "code":"ResourceScanLimitExceeded", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ResourceScanNotFoundException":{ + "type":"structure", + "members":{ + }, + "documentation":"

The resource scan was not found.

", + "error":{ + "code":"ResourceScanNotFound", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ResourceScanStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "FAILED", + "COMPLETE", + "EXPIRED" + ] + }, + "ResourceScanStatusReason":{"type":"string"}, + "ResourceScanSummaries":{ + "type":"list", + "member":{"shape":"ResourceScanSummary"} + }, + "ResourceScanSummary":{ + "type":"structure", + "members":{ + "ResourceScanId":{ + "shape":"ResourceScanId", + "documentation":"

The Amazon Resource Name (ARN) of the resource scan.

" + }, + "Status":{ + "shape":"ResourceScanStatus", + "documentation":"

Status of the resource scan.

INPROGRESS

The resource scan is still in progress.

COMPLETE

The resource scan is complete.

EXPIRED

The resource scan has expired.

FAILED

The resource scan has failed.

" + }, + "StatusReason":{ + "shape":"ResourceScanStatusReason", + "documentation":"

The reason for the resource scan status, providing more information if a failure happened.

" + }, + "StartTime":{ + "shape":"Timestamp", + "documentation":"

The time that the resource scan was started.

" + }, + "EndTime":{ + "shape":"Timestamp", + "documentation":"

The time that the resource scan was finished.

" + }, + "PercentageCompleted":{ + "shape":"PercentageCompleted", + "documentation":"

The percentage of the resource scan that has been completed.

" + } + }, + "documentation":"

A summary of the resource scan. This is returned by the ListResourceScan API action.

" + }, + "ResourceScannerMaxResults":{ + "type":"integer", + "box":true + }, "ResourceSignalStatus":{ "type":"string", "enum":[ @@ -4882,10 +5617,29 @@ "max":256, "min":1 }, + "ResourceTypePrefix":{"type":"string"}, "ResourceTypes":{ "type":"list", "member":{"shape":"ResourceType"} }, + "ResourcesFailed":{ + "type":"integer", + "min":0 + }, + "ResourcesPending":{ + "type":"integer", + "min":0 + }, + "ResourcesProcessing":{ + "type":"integer", + "min":0 + }, + "ResourcesRead":{"type":"integer"}, + "ResourcesScanned":{"type":"integer"}, + "ResourcesSucceeded":{ + "type":"integer", + "min":0 + }, "ResourcesToImport":{ "type":"list", "member":{"shape":"ResourceToImport"}, @@ -4993,6 +5747,50 @@ "max":4096, "min":1 }, + "ScannedResource":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see IaC generator supported resource types In the CloudFormation User Guide

" + }, + "ResourceIdentifier":{ + "shape":"JazzResourceIdentifierProperties", + "documentation":"

A list of up to 256 key-value pairs that identifies for the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development.

" + }, + "ManagedByStack":{ + "shape":"ManagedByStack", + "documentation":"

If true, the resource is managed by a CloudFormation stack.

" + } + }, + "documentation":"

A scanned resource returned by ListResourceScanResources or ListResourceScanRelatedResources.

" + }, + "ScannedResourceIdentifier":{ + "type":"structure", + "required":[ + "ResourceType", + "ResourceIdentifier" + ], + "members":{ + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of the resource, such as AWS::DynamoDB::Table. For the list of supported resources, see IaC generator supported resource types In the CloudFormation User Guide

" + }, + "ResourceIdentifier":{ + "shape":"JazzResourceIdentifierProperties", + "documentation":"

A list of up to 256 key-value pairs that identifies the scanned resource. The key is the name of one of the primary identifiers for the resource. (Primary identifiers are specified in the primaryIdentifier list in the resource schema.) The value is the value of that primary identifier. For example, for a AWS::DynamoDB::Table resource, the primary identifiers is TableName so the key-value pair could be \"TableName\": \"MyDDBTable\". For more information, see primaryIdentifier in the CloudFormation Command Line Interface User guide for extension development.

" + } + }, + "documentation":"

Identifies a scanned resource. This is used with the ListResourceScanRelatedResources API action.

" + }, + "ScannedResourceIdentifiers":{ + "type":"list", + "member":{"shape":"ScannedResourceIdentifier"} + }, + "ScannedResources":{ + "type":"list", + "member":{"shape":"ScannedResource"} + }, "Scope":{ "type":"list", "member":{"shape":"ResourceAttribute"} @@ -6137,7 +6935,7 @@ }, "RegionOrder":{ "shape":"RegionList", - "documentation":"

The order of the Regions where you want to perform the stack operation.

" + "documentation":"

The order of the Regions where you want to perform the stack operation.

RegionOrder isn't followed if AutoDeployment is enabled.

" }, "FailureToleranceCount":{ "shape":"FailureToleranceCount", @@ -6435,6 +7233,24 @@ }, "exception":true }, + "StartResourceScanInput":{ + "type":"structure", + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

A unique identifier for this StartResourceScan request. Specify this token if you plan to retry requests so that CloudFormation knows that you're not attempting to start a new resource scan.

" + } + } + }, + "StartResourceScanOutput":{ + "type":"structure", + "members":{ + "ResourceScanId":{ + "shape":"ResourceScanId", + "documentation":"

The Amazon Resource Name (ARN) of the resource scan. The format is arn:${Partition}:cloudformation:${Region}:${Account}:resourceScan/${Id}. An example is arn:aws:cloudformation:us-east-1:123456789012:resourceScan/f5b490f7-7ed4-428a-aa06-31ff25db0772 .

" + } + } + }, "StatusMessage":{ "type":"string", "max":1024 @@ -6511,7 +7327,28 @@ "type":"string", "min":1 }, + "TemplateConfiguration":{ + "type":"structure", + "members":{ + "DeletionPolicy":{ + "shape":"GeneratedTemplateDeletionPolicy", + "documentation":"

The DeletionPolicy assigned to resources in the generated template. Supported values are:

For more information, see DeletionPolicy attribute in the CloudFormation User Guide.

" + }, + "UpdateReplacePolicy":{ + "shape":"GeneratedTemplateUpdateReplacePolicy", + "documentation":"

The UpdateReplacePolicy assigned to resources in the generated template. Supported values are:

For more information, see UpdateReplacePolicy attribute in the CloudFormation User Guide.

" + } + }, + "documentation":"

The configuration details of a generated template.

" + }, "TemplateDescription":{"type":"string"}, + "TemplateFormat":{ + "type":"string", + "enum":[ + "JSON", + "YAML" + ] + }, "TemplateParameter":{ "type":"structure", "members":{ @@ -6538,6 +7375,28 @@ "type":"list", "member":{"shape":"TemplateParameter"} }, + "TemplateProgress":{ + "type":"structure", + "members":{ + "ResourcesSucceeded":{ + "shape":"ResourcesSucceeded", + "documentation":"

The number of resources that succeeded the template generation.

" + }, + "ResourcesFailed":{ + "shape":"ResourcesFailed", + "documentation":"

The number of resources that failed the template generation.

" + }, + "ResourcesProcessing":{ + "shape":"ResourcesProcessing", + "documentation":"

The number of resources that are in-process for the template generation.

" + }, + "ResourcesPending":{ + "shape":"ResourcesPending", + "documentation":"

The number of resources that are still pending the template generation.

" + } + }, + "documentation":"

A summary of the progress of the template generation.

" + }, "TemplateStage":{ "type":"string", "enum":[ @@ -6545,6 +7404,49 @@ "Processed" ] }, + "TemplateStatusReason":{ + "type":"string", + "max":256, + "min":1 + }, + "TemplateSummaries":{ + "type":"list", + "member":{"shape":"TemplateSummary"} + }, + "TemplateSummary":{ + "type":"structure", + "members":{ + "GeneratedTemplateId":{ + "shape":"GeneratedTemplateId", + "documentation":"

The Amazon Resource Name (ARN) of the generated template. The format is arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example, arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc .

" + }, + "GeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

The name of the generated template.

" + }, + "Status":{ + "shape":"GeneratedTemplateStatus", + "documentation":"

The status of the template generation. Supported values are:

" + }, + "StatusReason":{ + "shape":"TemplateStatusReason", + "documentation":"

The reason for the current template generation status. This will provide more details if a failure happened.

" + }, + "CreationTime":{ + "shape":"CreationTime", + "documentation":"

The time the generated template was created.

" + }, + "LastUpdatedTime":{ + "shape":"LastUpdatedTime", + "documentation":"

The time the generated template was last updated.

" + }, + "NumberOfResources":{ + "shape":"NumberOfResources", + "documentation":"

The number of resources in the generated template. This is a total of resources in pending, in-progress, completed, and failed states.

" + } + }, + "documentation":"

The summary of a generated template.

" + }, "TemplateSummaryConfig":{ "type":"structure", "members":{ @@ -6628,6 +7530,10 @@ "type":"integer", "min":0 }, + "TotalWarnings":{ + "type":"integer", + "min":0 + }, "TransformName":{"type":"string"}, "TransformsList":{ "type":"list", @@ -6915,6 +7821,45 @@ "type":"list", "member":{"shape":"TypeConfigurationIdentifier"} }, + "UpdateGeneratedTemplateInput":{ + "type":"structure", + "required":["GeneratedTemplateName"], + "members":{ + "GeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

The name or Amazon Resource Name (ARN) of a generated template.

" + }, + "NewGeneratedTemplateName":{ + "shape":"GeneratedTemplateName", + "documentation":"

An optional new name to assign to the generated template.

" + }, + "AddResources":{ + "shape":"ResourceDefinitions", + "documentation":"

An optional list of resources to be added to the generated template.

" + }, + "RemoveResources":{ + "shape":"JazzLogicalResourceIds", + "documentation":"

A list of logical ids for resources to remove from the generated template.

" + }, + "RefreshAllResources":{ + "shape":"RefreshAllResources", + "documentation":"

If true, update the resource properties in the generated template with their current live state. This feature is useful when the resource properties in your generated a template does not reflect the live state of the resource properties. This happens when a user update the resource properties after generating a template.

" + }, + "TemplateConfiguration":{ + "shape":"TemplateConfiguration", + "documentation":"

The configuration details of the generated template, including the DeletionPolicy and UpdateReplacePolicy.

" + } + } + }, + "UpdateGeneratedTemplateOutput":{ + "type":"structure", + "members":{ + "GeneratedTemplateId":{ + "shape":"GeneratedTemplateId", + "documentation":"

The Amazon Resource Name (ARN) of the generated template. The format is arn:${Partition}:cloudformation:${Region}:${Account}:generatedtemplate/${Id}. For example, arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/2e8465c1-9a80-43ea-a3a3-4f2d692fe6dc .

" + } + } + }, "UpdateStackInput":{ "type":"structure", "required":["StackName"], @@ -7232,6 +8177,54 @@ "PRIVATE" ] }, + "WarningDetail":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"WarningType", + "documentation":"

The type of this warning. For more information, see IaC generator and write-only properties in the CloudFormation User Guide.

Currently the resource and property reference documentation does not indicate if a property uses a type of oneOf or anyOf. You need to look at the resource provider schema.

" + }, + "Properties":{ + "shape":"WarningProperties", + "documentation":"

The properties of the resource that are impacted by this warning.

" + } + }, + "documentation":"

The warnings generated for a specific resource for this generated template.

" + }, + "WarningDetails":{ + "type":"list", + "member":{"shape":"WarningDetail"} + }, + "WarningProperties":{ + "type":"list", + "member":{"shape":"WarningProperty"} + }, + "WarningProperty":{ + "type":"structure", + "members":{ + "PropertyPath":{ + "shape":"PropertyPath", + "documentation":"

The path of the property. For example, if this is for the S3Bucket member of the Code property, the property path would be Code/S3Bucket.

" + }, + "Required":{ + "shape":"RequiredProperty", + "documentation":"

If true, the specified property is required.

" + }, + "Description":{ + "shape":"PropertyDescription", + "documentation":"

The description of the property from the resource provider schema.

" + } + }, + "documentation":"

A specific property that is impacted by a warning.

" + }, + "WarningType":{ + "type":"string", + "enum":[ + "MUTUALLY_EXCLUSIVE_PROPERTIES", + "UNSUPPORTED_PROPERTIES", + "MUTUALLY_EXCLUSIVE_TYPES" + ] + }, "Warnings":{ "type":"structure", "members":{ diff -Nru awscli-2.15.9/awscli/botocore/data/cloudfront-keyvaluestore/2022-07-26/service-2.json awscli-2.15.22/awscli/botocore/data/cloudfront-keyvaluestore/2022-07-26/service-2.json --- awscli-2.15.9/awscli/botocore/data/cloudfront-keyvaluestore/2022-07-26/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cloudfront-keyvaluestore/2022-07-26/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -267,6 +267,14 @@ "LastModified":{ "shape":"Timestamp", "documentation":"

Date and time when the key value pairs in the Key Value Store was last modified.

" + }, + "Status":{ + "shape":"String", + "documentation":"

The current status of the Key Value Store.

" + }, + "FailureReason":{ + "shape":"String", + "documentation":"

The reason for Key Value Store creation failure.

" } }, "documentation":"

Metadata information about a Key Value Store.

" diff -Nru awscli-2.15.9/awscli/botocore/data/cloudtrail/2013-11-01/service-2.json awscli-2.15.22/awscli/botocore/data/cloudtrail/2013-11-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/cloudtrail/2013-11-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cloudtrail/2013-11-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -335,7 +335,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Disables Lake query federation on the specified event data store. When you disable federation, CloudTrail removes the metadata associated with the federated event data store in the Glue Data Catalog and removes registration for the federation role ARN and event data store in Lake Formation. No CloudTrail Lake data is deleted when you disable federation.

" + "documentation":"

Disables Lake query federation on the specified event data store. When you disable federation, CloudTrail disables the integration with Glue, Lake Formation, and Amazon Athena. After disabling Lake query federation, you can no longer query your event data in Amazon Athena.

No CloudTrail Lake data is deleted when you disable federation and you can continue to run queries in CloudTrail Lake.

" }, "EnableFederation":{ "name":"EnableFederation", @@ -362,7 +362,7 @@ {"shape":"AccessDeniedException"}, {"shape":"EventDataStoreFederationEnabledException"} ], - "documentation":"

Enables Lake query federation on the specified event data store. Federating an event data store lets you view the metadata associated with the event data store in the Glue Data Catalog and run SQL queries against your event data using Amazon Athena. The table metadata stored in the Glue Data Catalog lets the Athena query engine know how to find, read, and process the data that you want to query.

When you enable Lake query federation, CloudTrail creates a federated database named aws:cloudtrail (if the database doesn't already exist) and a federated table in the Glue Data Catalog. The event data store ID is used for the table name. CloudTrail registers the role ARN and event data store in Lake Formation, the service responsible for revoking or granting permissions to the federated resources in the Glue Data Catalog.

For more information about Lake query federation, see Federate an event data store.

" + "documentation":"

Enables Lake query federation on the specified event data store. Federating an event data store lets you view the metadata associated with the event data store in the Glue Data Catalog and run SQL queries against your event data using Amazon Athena. The table metadata stored in the Glue Data Catalog lets the Athena query engine know how to find, read, and process the data that you want to query.

When you enable Lake query federation, CloudTrail creates a managed database named aws:cloudtrail (if the database doesn't already exist) and a managed federated table in the Glue Data Catalog. The event data store ID is used for the table name. CloudTrail registers the role ARN and event data store in Lake Formation, the service responsible for allowing fine-grained access control of the federated resources in the Glue Data Catalog.

For more information about Lake query federation, see Federate an event data store.

" }, "GetChannel":{ "name":"GetChannel", @@ -604,6 +604,22 @@ "documentation":"

Returns information on all imports, or a select set of imports by ImportStatus or Destination.

", "idempotent":true }, + "ListInsightsMetricData":{ + "name":"ListInsightsMetricData", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListInsightsMetricDataRequest"}, + "output":{"shape":"ListInsightsMetricDataResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OperationNotPermittedException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"

Returns Insights metrics data for trails that have enabled Insights. The request must include the EventSource, EventName, and InsightType parameters.

If the InsightType is set to ApiErrorRateInsight, the request must also include the ErrorCode parameter.

The following are the available time periods for ListInsightsMetricData. Each cutoff is inclusive.

Access to the ListInsightsMetricData API operation is linked to the cloudtrail:LookupEvents action. To use this operation, you must have permissions to perform the cloudtrail:LookupEvents action.

", + "idempotent":true + }, "ListPublicKeys":{ "name":"ListPublicKeys", "http":{ @@ -1070,7 +1086,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"OrganizationNotInAllFeaturesModeException"} ], - "documentation":"

Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 7 and 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled.

For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management, data, or Insights events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors.

For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store.

", + "documentation":"

Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. RetentionPeriod is in days, and valid values are integers between 7 and 3653 if the BillingMode is set to EXTENDABLE_RETENTION_PRICING, or between 7 and 2557 if BillingMode is set to FIXED_RETENTION_PRICING. By default, TerminationProtection is enabled.

For event data stores for CloudTrail events, AdvancedEventSelectors includes or excludes management or data events in your event data store. For more information about AdvancedEventSelectors, see AdvancedEventSelectors.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, AdvancedEventSelectors includes events of that type in your event data store.

", "idempotent":true }, "UpdateTrail":{ @@ -1211,7 +1227,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the field is used only for selecting events as filtering is not supported.

For CloudTrail event records, supported fields include readOnly, eventCategory, eventSource (for management events), eventName, resources.type, and resources.ARN.

For event data stores for Config configuration items, Audit Manager evidence, or non-Amazon Web Services events, the only supported field is eventCategory.

" + "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.

For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource.

For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory.

" }, "Equals":{ "shape":"Operator", @@ -1580,7 +1596,7 @@ }, "CloudWatchLogsLogGroupArn":{ "shape":"String", - "documentation":"

Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. You must use a log group that exists in your account.

Not required unless you specify CloudWatchLogsRoleArn.

Only the management account can configure a CloudWatch Logs log group for an organization trail.

" + "documentation":"

Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs will be delivered. You must use a log group that exists in your account.

Not required unless you specify CloudWatchLogsRoleArn.

" }, "CloudWatchLogsRoleArn":{ "shape":"String", @@ -1662,7 +1678,7 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The resource type in which you want to log data events. You can specify the following basic event selector resource types:

The following resource types are also available through advanced event selectors. Basic event selector resource types are valid in advanced event selectors, but advanced event selector resource types are not valid in basic event selectors. For more information, see AdvancedFieldSelector.

" + "documentation":"

The resource type in which you want to log data events. You can specify the following basic event selector resource types:

Additional resource types are available through advanced event selectors. For more information about these additional resource types, see AdvancedFieldSelector.

" }, "Values":{ "shape":"DataResourceValues", @@ -1870,7 +1886,7 @@ "members":{ "Type":{ "shape":"DestinationType", - "documentation":"

The type of destination for events arriving from a channel. For channels used for a CloudTrail Lake integration, the value is EventDataStore. For service-linked channels, the value is AWS_SERVICE.

" + "documentation":"

The type of destination for events arriving from a channel. For channels used for a CloudTrail Lake integration, the value is EVENT_DATA_STORE. For service-linked channels, the value is AWS_SERVICE.

" }, "Location":{ "shape":"Location", @@ -1915,6 +1931,7 @@ } } }, + "Double":{"type":"double"}, "EnableFederationRequest":{ "type":"structure", "required":[ @@ -1949,6 +1966,11 @@ } } }, + "ErrorCode":{ + "type":"string", + "max":128, + "pattern":"^[\\w\\d\\s_.,\\-:\\[\\]]+$" + }, "ErrorMessage":{ "type":"string", "max":1000, @@ -2145,6 +2167,11 @@ "type":"list", "member":{"shape":"EventDataStore"} }, + "EventName":{ + "type":"string", + "max":128, + "pattern":"^[A-Za-z0-9_]+$" + }, "EventSelector":{ "type":"structure", "members":{ @@ -2171,6 +2198,11 @@ "type":"list", "member":{"shape":"EventSelector"} }, + "EventSource":{ + "type":"string", + "max":256, + "pattern":"^[a-z0-9_-]+\\.amazonaws\\.com$" + }, "EventsList":{ "type":"list", "member":{"shape":"Event"} @@ -2785,6 +2817,32 @@ "ApiErrorRateInsight" ] }, + "InsightsMetricDataType":{ + "type":"string", + "enum":[ + "FillWithZeros", + "NonZeroData" + ] + }, + "InsightsMetricMaxResults":{ + "type":"integer", + "max":21600, + "min":1 + }, + "InsightsMetricNextToken":{ + "type":"string", + "max":5000, + "min":1 + }, + "InsightsMetricPeriod":{ + "type":"integer", + "max":3600, + "min":60 + }, + "InsightsMetricValues":{ + "type":"list", + "member":{"shape":"Double"} + }, "InsufficientDependencyServiceAccessPermissionException":{ "type":"structure", "members":{ @@ -3155,6 +3213,89 @@ } } }, + "ListInsightsMetricDataRequest":{ + "type":"structure", + "required":[ + "EventSource", + "EventName", + "InsightType" + ], + "members":{ + "EventSource":{ + "shape":"EventSource", + "documentation":"

The Amazon Web Services service to which the request was made, such as iam.amazonaws.com or s3.amazonaws.com.

" + }, + "EventName":{ + "shape":"EventName", + "documentation":"

The name of the event, typically the Amazon Web Services API on which unusual levels of activity were recorded.

" + }, + "InsightType":{ + "shape":"InsightType", + "documentation":"

The type of CloudTrail Insights event, which is either ApiCallRateInsight or ApiErrorRateInsight. The ApiCallRateInsight Insights type analyzes write-only management API calls that are aggregated per minute against a baseline API call volume. The ApiErrorRateInsight Insights type analyzes management API calls that result in error codes.

" + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

Conditionally required if the InsightType parameter is set to ApiErrorRateInsight.

If returning metrics for the ApiErrorRateInsight Insights type, this is the error to retrieve data for. For example, AccessDenied.

" + }, + "StartTime":{ + "shape":"Date", + "documentation":"

Specifies, in UTC, the start time for time-series data. The value specified is inclusive; results include data points with the specified time stamp.

The default is 90 days before the time of request.

" + }, + "EndTime":{ + "shape":"Date", + "documentation":"

Specifies, in UTC, the end time for time-series data. The value specified is exclusive; results include data points up to the specified time stamp.

The default is the time of request.

" + }, + "Period":{ + "shape":"InsightsMetricPeriod", + "documentation":"

Granularity of data to retrieve, in seconds. Valid values are 60, 300, and 3600. If you specify any other value, you will get an error. The default is 3600 seconds.

" + }, + "DataType":{ + "shape":"InsightsMetricDataType", + "documentation":"

Type of datapoints to return. Valid values are NonZeroData and FillWithZeros. The default is NonZeroData.

" + }, + "MaxResults":{ + "shape":"InsightsMetricMaxResults", + "documentation":"

The maximum number of datapoints to return. Valid values are integers from 1 to 21600. The default value is 21600.

" + }, + "NextToken":{ + "shape":"InsightsMetricNextToken", + "documentation":"

Returned if all datapoints can't be returned in a single call. For example, due to reaching MaxResults.

Add this parameter to the request to continue retrieving results starting from the last evaluated point.

" + } + } + }, + "ListInsightsMetricDataResponse":{ + "type":"structure", + "members":{ + "EventSource":{ + "shape":"EventSource", + "documentation":"

The Amazon Web Services service to which the request was made, such as iam.amazonaws.com or s3.amazonaws.com.

" + }, + "EventName":{ + "shape":"EventName", + "documentation":"

The name of the event, typically the Amazon Web Services API on which unusual levels of activity were recorded.

" + }, + "InsightType":{ + "shape":"InsightType", + "documentation":"

The type of CloudTrail Insights event, which is either ApiCallRateInsight or ApiErrorRateInsight. The ApiCallRateInsight Insights type analyzes write-only management API calls that are aggregated per minute against a baseline API call volume. The ApiErrorRateInsight Insights type analyzes management API calls that result in error codes.

" + }, + "ErrorCode":{ + "shape":"ErrorCode", + "documentation":"

Only returned if InsightType parameter was set to ApiErrorRateInsight.

If returning metrics for the ApiErrorRateInsight Insights type, this is the error to retrieve data for. For example, AccessDenied.

" + }, + "Timestamps":{ + "shape":"Timestamps", + "documentation":"

List of timestamps at intervals corresponding to the specified time period.

" + }, + "Values":{ + "shape":"InsightsMetricValues", + "documentation":"

List of values representing the API call rate or error rate at each timestamp. The number of values is equal to the number of timestamps.

" + }, + "NextToken":{ + "shape":"InsightsMetricNextToken", + "documentation":"

Only returned if the full results could not be returned in a single query. You can set the NextToken parameter in the next request to this value to continue retrieval.

" + } + } + }, "ListPublicKeysRequest":{ "type":"structure", "members":{ @@ -4238,6 +4379,10 @@ "documentation":"

This exception is thrown when the request rate exceeds the limit.

", "exception":true }, + "Timestamps":{ + "type":"list", + "member":{"shape":"Date"} + }, "Trail":{ "type":"structure", "members":{ @@ -4549,7 +4694,7 @@ }, "CloudWatchLogsLogGroupArn":{ "shape":"String", - "documentation":"

Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs are delivered. You must use a log group that exists in your account.

Not required unless you specify CloudWatchLogsRoleArn.

Only the management account can configure a CloudWatch Logs log group for an organization trail.

" + "documentation":"

Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs are delivered. You must use a log group that exists in your account.

Not required unless you specify CloudWatchLogsRoleArn.

" }, "CloudWatchLogsRoleArn":{ "shape":"String", diff -Nru awscli-2.15.9/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json awscli-2.15.22/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -532,7 +532,8 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

" + "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data.

Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

", + "requestcompression":{"encodings":["gzip"]} }, "PutMetricStream":{ "name":"PutMetricStream", diff -Nru awscli-2.15.9/awscli/botocore/data/codebuild/2016-10-06/service-2.json awscli-2.15.22/awscli/botocore/data/codebuild/2016-10-06/service-2.json --- awscli-2.15.9/awscli/botocore/data/codebuild/2016-10-06/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/codebuild/2016-10-06/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -51,6 +51,19 @@ ], "documentation":"

Gets information about one or more builds.

" }, + "BatchGetFleets":{ + "name":"BatchGetFleets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchGetFleetsInput"}, + "output":{"shape":"BatchGetFleetsOutput"}, + "errors":[ + {"shape":"InvalidInputException"} + ], + "documentation":"

Gets information about one or more compute fleets.

" + }, "BatchGetProjects":{ "name":"BatchGetProjects", "http":{ @@ -90,6 +103,21 @@ ], "documentation":"

Returns an array of reports.

" }, + "CreateFleet":{ + "name":"CreateFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFleetInput"}, + "output":{"shape":"CreateFleetOutput"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"AccountLimitExceededException"} + ], + "documentation":"

Creates a compute fleet.

" + }, "CreateProject":{ "name":"CreateProject", "http":{ @@ -149,6 +177,19 @@ ], "documentation":"

Deletes a batch build.

" }, + "DeleteFleet":{ + "name":"DeleteFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFleetInput"}, + "output":{"shape":"DeleteFleetOutput"}, + "errors":[ + {"shape":"InvalidInputException"} + ], + "documentation":"

Deletes a compute fleet. When you delete a compute fleet, its builds are not deleted.

" + }, "DeleteProject":{ "name":"DeleteProject", "http":{ @@ -378,6 +419,19 @@ "output":{"shape":"ListCuratedEnvironmentImagesOutput"}, "documentation":"

Gets information about Docker images that are managed by CodeBuild.

" }, + "ListFleets":{ + "name":"ListFleets", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFleetsInput"}, + "output":{"shape":"ListFleetsOutput"}, + "errors":[ + {"shape":"InvalidInputException"} + ], + "documentation":"

Gets a list of compute fleet names with each compute fleet name representing a single compute fleet.

" + }, "ListProjects":{ "name":"ListProjects", "http":{ @@ -570,6 +624,21 @@ ], "documentation":"

Stops a running batch build.

" }, + "UpdateFleet":{ + "name":"UpdateFleet", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFleetInput"}, + "output":{"shape":"UpdateFleetOutput"}, + "errors":[ + {"shape":"InvalidInputException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccountLimitExceededException"} + ], + "documentation":"

Updates a compute fleet.

" + }, "UpdateProject":{ "name":"UpdateProject", "http":{ @@ -735,6 +804,29 @@ } } }, + "BatchGetFleetsInput":{ + "type":"structure", + "required":["names"], + "members":{ + "names":{ + "shape":"FleetNames", + "documentation":"

The names or ARNs of the compute fleets.

" + } + } + }, + "BatchGetFleetsOutput":{ + "type":"structure", + "members":{ + "fleets":{ + "shape":"Fleets", + "documentation":"

Information about the requested compute fleets.

" + }, + "fleetsNotFound":{ + "shape":"FleetNames", + "documentation":"

The names of compute fleets for which information could not be found.

" + } + } + }, "BatchGetProjectsInput":{ "type":"structure", "required":["names"], @@ -1472,6 +1564,50 @@ "type":"list", "member":{"shape":"NonEmptyString"} }, + "CreateFleetInput":{ + "type":"structure", + "required":[ + "name", + "baseCapacity", + "environmentType", + "computeType" + ], + "members":{ + "name":{ + "shape":"FleetName", + "documentation":"

The name of the compute fleet.

" + }, + "baseCapacity":{ + "shape":"FleetCapacity", + "documentation":"

The initial number of machines allocated to the fleet, which defines the number of builds that can run in parallel.

" + }, + "environmentType":{ + "shape":"EnvironmentType", + "documentation":"

The environment type of the compute fleet.

For more information, see Build environment compute types in the CodeBuild user guide.

" + }, + "computeType":{ + "shape":"ComputeType", + "documentation":"

Information about the compute resources the compute fleet uses. Available values include:

If you use BUILD_GENERAL1_SMALL:

If you use BUILD_GENERAL1_LARGE:

For more information, see Build environment compute types in the CodeBuild User Guide.

" + }, + "scalingConfiguration":{ + "shape":"ScalingConfigurationInput", + "documentation":"

The scaling configuration of the compute fleet.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with this compute fleet.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

" + } + } + }, + "CreateFleetOutput":{ + "type":"structure", + "members":{ + "fleet":{ + "shape":"Fleet", + "documentation":"

Information about the compute fleet

" + } + } + }, "CreateProjectInput":{ "type":"structure", "required":[ @@ -1688,6 +1824,21 @@ } } }, + "DeleteFleetInput":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the compute fleet.

" + } + } + }, + "DeleteFleetOutput":{ + "type":"structure", + "members":{ + } + }, "DeleteProjectInput":{ "type":"structure", "required":["name"], @@ -2002,6 +2153,137 @@ "type":"list", "member":{"shape":"FilterGroup"} }, + "Fleet":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the compute fleet.

" + }, + "name":{ + "shape":"FleetName", + "documentation":"

The name of the compute fleet.

" + }, + "id":{ + "shape":"NonEmptyString", + "documentation":"

The ID of the compute fleet.

" + }, + "created":{ + "shape":"Timestamp", + "documentation":"

The time at which the compute fleet was created.

" + }, + "lastModified":{ + "shape":"Timestamp", + "documentation":"

The time at which the compute fleet was last modified.

" + }, + "status":{ + "shape":"FleetStatus", + "documentation":"

The status of the compute fleet.

" + }, + "baseCapacity":{ + "shape":"FleetCapacity", + "documentation":"

The initial number of machines allocated to the compute fleet, which defines the number of builds that can run in parallel.

" + }, + "environmentType":{ + "shape":"EnvironmentType", + "documentation":"

The environment type of the compute fleet.

For more information, see Build environment compute types in the CodeBuild user guide.

" + }, + "computeType":{ + "shape":"ComputeType", + "documentation":"

Information about the compute resources the compute fleet uses. Available values include:

If you use BUILD_GENERAL1_SMALL:

If you use BUILD_GENERAL1_LARGE:

For more information, see Build environment compute types in the CodeBuild User Guide.

" + }, + "scalingConfiguration":{ + "shape":"ScalingConfigurationOutput", + "documentation":"

The scaling configuration of the compute fleet.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with this compute fleet.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

" + } + }, + "documentation":"

A set of dedicated instances for your build environment.

" + }, + "FleetArns":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":100, + "min":1 + }, + "FleetCapacity":{ + "type":"integer", + "min":1 + }, + "FleetContextCode":{ + "type":"string", + "enum":[ + "CREATE_FAILED", + "UPDATE_FAILED" + ] + }, + "FleetName":{ + "type":"string", + "max":128, + "min":2, + "pattern":"[A-Za-z0-9][A-Za-z0-9\\-_]{1,127}" + }, + "FleetNames":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":100, + "min":1 + }, + "FleetScalingMetricType":{ + "type":"string", + "enum":["FLEET_UTILIZATION_RATE"] + }, + "FleetScalingType":{ + "type":"string", + "enum":["TARGET_TRACKING_SCALING"] + }, + "FleetSortByType":{ + "type":"string", + "enum":[ + "NAME", + "CREATED_TIME", + "LAST_MODIFIED_TIME" + ] + }, + "FleetStatus":{ + "type":"structure", + "members":{ + "statusCode":{ + "shape":"FleetStatusCode", + "documentation":"

The status code of the compute fleet. Valid values include:

" + }, + "context":{ + "shape":"FleetContextCode", + "documentation":"

Additional information about a compute fleet. Valid values include:

" + }, + "message":{ + "shape":"String", + "documentation":"

A message associated with the status of a compute fleet.

" + } + }, + "documentation":"

The status of the compute fleet.

" + }, + "FleetStatusCode":{ + "type":"string", + "enum":[ + "CREATING", + "UPDATING", + "ROTATING", + "DELETING", + "CREATE_FAILED", + "UPDATE_ROLLBACK_FAILED", + "ACTIVE" + ] + }, + "Fleets":{ + "type":"list", + "member":{"shape":"Fleet"}, + "max":100, + "min":1 + }, "GetReportGroupTrendInput":{ "type":"structure", "required":[ @@ -2310,6 +2592,40 @@ } } }, + "ListFleetsInput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"SensitiveString", + "documentation":"

During a previous call, if there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a nextToken. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.

" + }, + "maxResults":{ + "shape":"PageSize", + "documentation":"

The maximum number of paginated compute fleets returned per response. Use nextToken to iterate pages in the list of returned compute fleets.

" + }, + "sortOrder":{ + "shape":"SortOrderType", + "documentation":"

The order in which to list compute fleets. Valid values include:

Use sortBy to specify the criterion to be used to list compute fleet names.

" + }, + "sortBy":{ + "shape":"FleetSortByType", + "documentation":"

The criterion to be used to list compute fleet names. Valid values include:

Use sortOrder to specify in what order to list the compute fleet names based on the preceding criteria.

" + } + } + }, + "ListFleetsOutput":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

If there are more than 100 items in the list, only the first 100 items are returned, along with a unique string called a nextToken. To get the next batch of items in the list, call this operation again, adding the next token to the call.

" + }, + "fleets":{ + "shape":"FleetArns", + "documentation":"

The list of compute fleet names.

" + } + } + }, "ListProjectsInput":{ "type":"structure", "members":{ @@ -2903,7 +3219,7 @@ "members":{ "type":{ "shape":"EnvironmentType", - "documentation":"

The type of build environment to use for related builds.

For more information, see Build environment compute types in the CodeBuild user guide.

" + "documentation":"

The type of build environment to use for related builds.

If you're using compute fleets during project creation, type will be ignored.

For more information, see Build environment compute types in the CodeBuild user guide.

" }, "image":{ "shape":"NonEmptyString", @@ -2911,7 +3227,11 @@ }, "computeType":{ "shape":"ComputeType", - "documentation":"

Information about the compute resources the build project uses. Available values include:

If you use BUILD_GENERAL1_SMALL:

If you use BUILD_GENERAL1_LARGE:

For more information, see Build Environment Compute Types in the CodeBuild User Guide.

" + "documentation":"

Information about the compute resources the build project uses. Available values include:

If you use BUILD_GENERAL1_SMALL:

If you use BUILD_GENERAL1_LARGE:

If you're using compute fleets during project creation, computeType will be ignored.

For more information, see Build Environment Compute Types in the CodeBuild User Guide.

" + }, + "fleet":{ + "shape":"ProjectFleet", + "documentation":"

A ProjectFleet object to use for this build project.

" }, "environmentVariables":{ "shape":"EnvironmentVariables", @@ -2966,6 +3286,16 @@ "type":"list", "member":{"shape":"ProjectFileSystemLocation"} }, + "ProjectFleet":{ + "type":"structure", + "members":{ + "fleetArn":{ + "shape":"String", + "documentation":"

Specifies the compute fleet ARN for the build project.

" + } + }, + "documentation":"

Information about the compute fleet of the build project. For more information, see Working with reserved capacity in CodeBuild.

" + }, "ProjectName":{ "type":"string", "max":255, @@ -3507,6 +3837,46 @@ }, "documentation":"

Information about the S3 bucket where the raw data of a report are exported.

" }, + "ScalingConfigurationInput":{ + "type":"structure", + "members":{ + "scalingType":{ + "shape":"FleetScalingType", + "documentation":"

The scaling type for a compute fleet.

" + }, + "targetTrackingScalingConfigs":{ + "shape":"TargetTrackingScalingConfigurations", + "documentation":"

A list of TargetTrackingScalingConfiguration objects.

" + }, + "maxCapacity":{ + "shape":"FleetCapacity", + "documentation":"

The maximum number of instances in the fleet when auto-scaling.

" + } + }, + "documentation":"

The scaling configuration input of a compute fleet.

" + }, + "ScalingConfigurationOutput":{ + "type":"structure", + "members":{ + "scalingType":{ + "shape":"FleetScalingType", + "documentation":"

The scaling type for a compute fleet.

" + }, + "targetTrackingScalingConfigs":{ + "shape":"TargetTrackingScalingConfigurations", + "documentation":"

A list of TargetTrackingScalingConfiguration objects.

" + }, + "maxCapacity":{ + "shape":"FleetCapacity", + "documentation":"

The maximum number of instances in the fleet when auto-scaling.

" + }, + "desiredCapacity":{ + "shape":"FleetCapacity", + "documentation":"

The desired number of instances in the fleet when auto-scaling.

" + } + }, + "documentation":"

The scaling configuration output of a compute fleet.

" + }, "SecurityGroupIds":{ "type":"list", "member":{"shape":"NonEmptyString"}, @@ -3517,6 +3887,10 @@ "min":1, "sensitive":true }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, "ServerType":{ "type":"string", "enum":[ @@ -3858,6 +4232,10 @@ "debugSessionEnabled":{ "shape":"WrapperBoolean", "documentation":"

Specifies if session debugging is enabled for this build. For more information, see Viewing a running build in Session Manager.

" + }, + "fleetOverride":{ + "shape":"ProjectFleet", + "documentation":"

A ProjectFleet object specified for this build that overrides the one defined in the build project.

" } } }, @@ -3942,6 +4320,24 @@ "max":50, "min":0 }, + "TargetTrackingScalingConfiguration":{ + "type":"structure", + "members":{ + "metricType":{ + "shape":"FleetScalingMetricType", + "documentation":"

The metric type to determine auto-scaling.

" + }, + "targetValue":{ + "shape":"WrapperDouble", + "documentation":"

The value of metricType when to start scaling.

" + } + }, + "documentation":"

Defines when a new instance is auto-scaled into the compute fleet.

" + }, + "TargetTrackingScalingConfigurations":{ + "type":"list", + "member":{"shape":"TargetTrackingScalingConfiguration"} + }, "TestCase":{ "type":"structure", "members":{ @@ -4027,6 +4423,45 @@ "min":5 }, "Timestamp":{"type":"timestamp"}, + "UpdateFleetInput":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"NonEmptyString", + "documentation":"

The ARN of the compute fleet.

" + }, + "baseCapacity":{ + "shape":"FleetCapacity", + "documentation":"

The initial number of machines allocated to the compute fleet, which defines the number of builds that can run in parallel.

" + }, + "environmentType":{ + "shape":"EnvironmentType", + "documentation":"

The environment type of the compute fleet.

For more information, see Build environment compute types in the CodeBuild user guide.

" + }, + "computeType":{ + "shape":"ComputeType", + "documentation":"

Information about the compute resources the compute fleet uses. Available values include:

If you use BUILD_GENERAL1_SMALL:

If you use BUILD_GENERAL1_LARGE:

For more information, see Build environment compute types in the CodeBuild User Guide.

" + }, + "scalingConfiguration":{ + "shape":"ScalingConfigurationInput", + "documentation":"

The scaling configuration of the compute fleet.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

A list of tag key and value pairs associated with this compute fleet.

These tags are available for use by Amazon Web Services services that support CodeBuild build project tags.

" + } + } + }, + "UpdateFleetOutput":{ + "type":"structure", + "members":{ + "fleet":{ + "shape":"Fleet", + "documentation":"

A Fleet object.

" + } + } + }, "UpdateProjectInput":{ "type":"structure", "required":["name"], @@ -4316,6 +4751,7 @@ ] }, "WrapperBoolean":{"type":"boolean"}, + "WrapperDouble":{"type":"double"}, "WrapperInt":{"type":"integer"}, "WrapperLong":{"type":"long"} }, diff -Nru awscli-2.15.9/awscli/botocore/data/codepipeline/2015-07-09/service-2.json awscli-2.15.22/awscli/botocore/data/codepipeline/2015-07-09/service-2.json --- awscli-2.15.9/awscli/botocore/data/codepipeline/2015-07-09/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/codepipeline/2015-07-09/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -526,7 +526,8 @@ "errors":[ {"shape":"ValidationException"}, {"shape":"ConflictException"}, - {"shape":"PipelineNotFoundException"} + {"shape":"PipelineNotFoundException"}, + {"shape":"ConcurrentPipelineExecutionsLimitExceededException"} ], "documentation":"

Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.

" }, @@ -865,6 +866,10 @@ "namespace":{ "shape":"ActionNamespace", "documentation":"

The variable namespace associated with the action. All variables produced as output by this action fall under this namespace.

" + }, + "timeoutInMinutes":{ + "shape":"ActionTimeout", + "documentation":"

A timeout duration in minutes that can be applied against the ActionType’s default timeout value specified in Quotas for CodePipeline . This attribute is available only to the manual approval ActionType.

" } }, "documentation":"

Represents information about an action declaration.

" @@ -946,6 +951,10 @@ "shape":"Timestamp", "documentation":"

The last update time of the action execution.

" }, + "updatedBy":{ + "shape":"LastUpdatedBy", + "documentation":"

The ARN of the user who changed the pipeline execution details.

" + }, "status":{ "shape":"ActionExecutionStatus", "documentation":"

The status of the action execution. Status categories are InProgress, Succeeded, and Failed.

" @@ -971,6 +980,10 @@ "pipelineExecutionId":{ "shape":"PipelineExecutionId", "documentation":"

The pipeline execution ID used to filter action execution history.

" + }, + "latestInPipelineExecution":{ + "shape":"LatestInPipelineExecutionFilter", + "documentation":"

The latest execution in the pipeline.

Filtering on the latest execution is available for executions run on or after February 08, 2024.

" } }, "documentation":"

Filter values for the action execution.

" @@ -1039,7 +1052,8 @@ "externalExecutionUrl":{ "shape":"Url", "documentation":"

The deepest external link to the external resource (for example, a repository URL or deployment endpoint) that is used when running the action.

" - } + }, + "errorDetails":{"shape":"ErrorDetails"} }, "documentation":"

Execution result information, such as the external execution ID.

" }, @@ -1144,6 +1158,12 @@ "type":"list", "member":{"shape":"ActionState"} }, + "ActionTimeout":{ + "type":"integer", + "box":true, + "max":86400, + "min":5 + }, "ActionType":{ "type":"structure", "required":[ @@ -1698,6 +1718,14 @@ "documentation":"

Unable to modify the tag due to a simultaneous update request.

", "exception":true }, + "ConcurrentPipelineExecutionsLimitExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"Message"} + }, + "documentation":"

The pipeline has reached the limit for concurrent pipeline executions.

", + "exception":true + }, "ConflictException":{ "type":"structure", "members":{ @@ -2024,6 +2052,14 @@ "max":1500, "min":1 }, + "ExecutionMode":{ + "type":"string", + "enum":[ + "QUEUED", + "SUPERSEDED", + "PARALLEL" + ] + }, "ExecutionSummary":{ "type":"string", "max":2048, @@ -2278,6 +2314,32 @@ }, "documentation":"

Represents the output of a GetThirdPartyJobDetails action.

" }, + "GitBranchFilterCriteria":{ + "type":"structure", + "members":{ + "includes":{ + "shape":"GitBranchPatternList", + "documentation":"

The list of patterns of Git branches that, when a commit is pushed, are to be included as criteria that starts the pipeline.

" + }, + "excludes":{ + "shape":"GitBranchPatternList", + "documentation":"

The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.

" + } + }, + "documentation":"

The Git repository branches specified as filter criteria to start the pipeline.

" + }, + "GitBranchNamePattern":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "GitBranchPatternList":{ + "type":"list", + "member":{"shape":"GitBranchNamePattern"}, + "max":8, + "min":1 + }, "GitConfiguration":{ "type":"structure", "required":["sourceActionName"], @@ -2288,10 +2350,78 @@ }, "push":{ "shape":"GitPushFilterList", - "documentation":"

The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.

Git tags is the only supported event type.

" + "documentation":"

The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.

" + }, + "pullRequest":{ + "shape":"GitPullRequestFilterList", + "documentation":"

The field where the repository event that will start the pipeline is specified as pull requests.

" } }, - "documentation":"

A type of trigger configuration for Git-based source actions.

You can specify the Git configuration trigger type for all third-party Git-based source actions that are supported by the CodeStarSourceConnection action type.

V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

" + "documentation":"

A type of trigger configuration for Git-based source actions.

You can specify the Git configuration trigger type for all third-party Git-based source actions that are supported by the CodeStarSourceConnection action type.

" + }, + "GitFilePathFilterCriteria":{ + "type":"structure", + "members":{ + "includes":{ + "shape":"GitFilePathPatternList", + "documentation":"

The list of patterns of Git repository file paths that, when a commit is pushed, are to be included as criteria that starts the pipeline.

" + }, + "excludes":{ + "shape":"GitFilePathPatternList", + "documentation":"

The list of patterns of Git repository file paths that, when a commit is pushed, are to be excluded from starting the pipeline.

" + } + }, + "documentation":"

The Git repository file paths specified as filter criteria to start the pipeline.

" + }, + "GitFilePathPattern":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*" + }, + "GitFilePathPatternList":{ + "type":"list", + "member":{"shape":"GitFilePathPattern"}, + "max":8, + "min":1 + }, + "GitPullRequestEventType":{ + "type":"string", + "enum":[ + "OPEN", + "UPDATED", + "CLOSED" + ] + }, + "GitPullRequestEventTypeList":{ + "type":"list", + "member":{"shape":"GitPullRequestEventType"}, + "max":3, + "min":1 + }, + "GitPullRequestFilter":{ + "type":"structure", + "members":{ + "events":{ + "shape":"GitPullRequestEventTypeList", + "documentation":"

The field that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration.

" + }, + "branches":{ + "shape":"GitBranchFilterCriteria", + "documentation":"

The field that specifies to filter on branches for the pull request trigger configuration.

" + }, + "filePaths":{ + "shape":"GitFilePathFilterCriteria", + "documentation":"

The field that specifies to filter on file paths for the pull request trigger configuration.

" + } + }, + "documentation":"

The event criteria for the pull request trigger configuration, such as the lists of branches or file paths to include and exclude.

" + }, + "GitPullRequestFilterList":{ + "type":"list", + "member":{"shape":"GitPullRequestFilter"}, + "max":3, + "min":1 }, "GitPushFilter":{ "type":"structure", @@ -2299,6 +2429,14 @@ "tags":{ "shape":"GitTagFilterCriteria", "documentation":"

The field that contains the details for the Git tags trigger configuration.

" + }, + "branches":{ + "shape":"GitBranchFilterCriteria", + "documentation":"

The field that specifies to filter on branches for the push trigger configuration.

" + }, + "filePaths":{ + "shape":"GitFilePathFilterCriteria", + "documentation":"

The field that specifies to filter on file paths for the push trigger configuration.

" } }, "documentation":"

The event criteria that specify when a specified repository event will start the pipeline for the specified trigger configuration, such as the lists of Git tags to include and exclude.

" @@ -2306,7 +2444,7 @@ "GitPushFilterList":{ "type":"list", "member":{"shape":"GitPushFilter"}, - "max":1, + "max":3, "min":1 }, "GitTagFilterCriteria":{ @@ -2598,6 +2736,24 @@ "LastChangedAt":{"type":"timestamp"}, "LastChangedBy":{"type":"string"}, "LastUpdatedBy":{"type":"string"}, + "LatestInPipelineExecutionFilter":{ + "type":"structure", + "required":[ + "pipelineExecutionId", + "startTimeRange" + ], + "members":{ + "pipelineExecutionId":{ + "shape":"PipelineExecutionId", + "documentation":"

The execution ID for the latest execution in the pipeline.

" + }, + "startTimeRange":{ + "shape":"StartTimeRange", + "documentation":"

The start time to filter on for the latest execution in the pipeline. Valid options:

" + } + }, + "documentation":"

The field that specifies to filter on the latest execution in the pipeline.

Filtering on the latest execution is available for executions run on or after February 08, 2024.

" + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -2990,17 +3146,21 @@ "shape":"PipelineVersion", "documentation":"

The version number of the pipeline. A new pipeline always has a version number of 1. This number is incremented when a pipeline is updated.

" }, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

" + }, "pipelineType":{ "shape":"PipelineType", - "documentation":"

CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

For information about pricing for CodePipeline, see Pricing.

For information about which type of pipeline to choose, see What type of pipeline is right for me?.

V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

" - }, - "triggers":{ - "shape":"PipelineTriggerDeclarationList", - "documentation":"

The trigger configuration specifying a type of event, such as Git tags, that starts the pipeline.

When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

" + "documentation":"

CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

For information about pricing for CodePipeline, see Pricing.

For information about which type of pipeline to choose, see What type of pipeline is right for me?.

" }, "variables":{ "shape":"PipelineVariableDeclarationList", "documentation":"

A list that defines the pipeline variables for a pipeline resource. Variable names can have alphanumeric and underscore characters, and the values must match [A-Za-z0-9@\\-_]+.

" + }, + "triggers":{ + "shape":"PipelineTriggerDeclarationList", + "documentation":"

The trigger configuration specifying a type of event, such as Git tags, that starts the pipeline.

When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

" } }, "documentation":"

Represents the structure of actions and stages to be performed in the pipeline.

" @@ -3032,10 +3192,14 @@ "shape":"ArtifactRevisionList", "documentation":"

A list of ArtifactRevision objects included in a pipeline execution.

" }, - "trigger":{"shape":"ExecutionTrigger"}, "variables":{ "shape":"ResolvedPipelineVariableList", "documentation":"

A list of pipeline variables used for the pipeline execution.

" + }, + "trigger":{"shape":"ExecutionTrigger"}, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

" } }, "documentation":"

Represents information about an execution of a pipeline.

" @@ -3102,6 +3266,10 @@ "stopTrigger":{ "shape":"StopExecutionTrigger", "documentation":"

The interaction that stopped a pipeline execution.

" + }, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

" } }, "documentation":"

Summary information about a pipeline execution.

" @@ -3173,7 +3341,11 @@ }, "pipelineType":{ "shape":"PipelineType", - "documentation":"

CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

For information about pricing for CodePipeline, see Pricing.

For information about which type of pipeline to choose, see What type of pipeline is right for me?.

V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

" + "documentation":"

CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.

Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs.

For information about pricing for CodePipeline, see Pricing.

For information about which type of pipeline to choose, see What type of pipeline is right for me?.

" + }, + "executionMode":{ + "shape":"ExecutionMode", + "documentation":"

The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.

" }, "created":{ "shape":"Timestamp", @@ -3202,12 +3374,12 @@ "documentation":"

Provides the filter criteria and the source stage for the repository event that starts the pipeline, such as Git tags.

" } }, - "documentation":"

Represents information about the specified trigger configuration, such as the filter criteria and the source stage for the action that contains the trigger.

This is only supported for the CodeStarSourceConnection action type.

When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

" + "documentation":"

Represents information about the specified trigger configuration, such as the filter criteria and the source stage for the action that contains the trigger.

This is only supported for the CodeStarSourceConnection action type.

When a trigger configuration is specified, default change detection for repository and branch commits is disabled.

" }, "PipelineTriggerDeclarationList":{ "type":"list", "member":{"shape":"PipelineTriggerDeclaration"}, - "max":20 + "max":50 }, "PipelineTriggerProviderType":{ "type":"string", @@ -3236,7 +3408,7 @@ "documentation":"

The value of a pipeline-level variable.

" } }, - "documentation":"

A pipeline-level variable used for a pipeline execution.

V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

" + "documentation":"

A pipeline-level variable used for a pipeline execution.

" }, "PipelineVariableDeclaration":{ "type":"structure", @@ -3255,7 +3427,7 @@ "documentation":"

The description of a pipeline-level variable. It's used to add additional context about the variable, and not being used at time when pipeline executes.

" } }, - "documentation":"

A variable declared at the pipeline level.

V2 type pipelines, along with triggers on Git tags and pipeline-level variables, are not currently supported for CloudFormation and CDK resources in CodePipeline. For more information about V2 type pipelines, see Pipeline types in the CodePipeline User Guide.

" + "documentation":"

A variable declared at the pipeline level.

" }, "PipelineVariableDeclarationList":{ "type":"list", @@ -3879,6 +4051,10 @@ }, "documentation":"

Represents information about the run of a stage.

" }, + "StageExecutionList":{ + "type":"list", + "member":{"shape":"StageExecution"} + }, "StageExecutionStatus":{ "type":"string", "enum":[ @@ -3925,6 +4101,10 @@ "documentation":"

The name of the stage.

" }, "inboundExecution":{"shape":"StageExecution"}, + "inboundExecutions":{ + "shape":"StageExecutionList", + "documentation":"

The inbound executions for a stage.

" + }, "inboundTransitionState":{ "shape":"TransitionState", "documentation":"

The state of the inbound transition, which is either enabled or disabled.

" @@ -3985,6 +4165,13 @@ }, "documentation":"

Represents the output of a StartPipelineExecution action.

" }, + "StartTimeRange":{ + "type":"string", + "enum":[ + "Latest", + "All" + ] + }, "StopExecutionTrigger":{ "type":"structure", "members":{ diff -Nru awscli-2.15.9/awscli/botocore/data/cognito-idp/2016-04-18/service-2.json awscli-2.15.22/awscli/botocore/data/cognito-idp/2016-04-18/service-2.json --- awscli-2.15.9/awscli/botocore/data/cognito-idp/2016-04-18/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cognito-idp/2016-04-18/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -571,7 +571,7 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Begins setup of time-based one-time password (TOTP) multi-factor authentication (MFA) for a user, with a unique private key that Amazon Cognito generates and returns in the API response. You can authorize an AssociateSoftwareToken request with either the user's access token, or a session string from a challenge response that you received from Amazon Cognito.

Amazon Cognito disassociates an existing software token when you verify the new token in a VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with AssociateSoftwareToken and VerifySoftwareToken.

After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "ChangePassword":{ @@ -595,7 +595,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Changes the password for a specified user in a user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Changes the password for a specified user in a user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "ConfirmDevice":{ @@ -621,7 +621,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Confirms tracking of the device. This API call is the call that begins device tracking.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Confirms tracking of the device. This API call is the call that begins device tracking. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "ConfirmForgotPassword":{ @@ -650,7 +650,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Allows a user to enter a confirmation code to reset a forgotten password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Allows a user to enter a confirmation code to reset a forgotten password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "ConfirmSignUp":{ @@ -678,7 +678,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.

Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

This public API operation provides a code that Amazon Cognito sent to your user when they signed up in your user pool via the SignUp API operation. After your user enters their code, they confirm ownership of the email address or phone number that they provided, and their user account becomes active. Depending on your user pool configuration, your users will receive their confirmation code in an email or SMS message.

Local users who signed up in your user pool are the only type of user who can confirm sign-up with a code. Users who federate through an external identity provider (IdP) have already been confirmed by their IdP. Administrator-created users, users created with the AdminCreateUser API operation, confirm their accounts when they respond to their invitation email message and choose a password. They do not receive a confirmation code. Instead, they receive a temporary password.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "CreateGroup":{ @@ -717,7 +717,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Creates an IdP for a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" + "documentation":"

Adds a configuration and trust relationship between a third-party identity provider (IdP) and a user pool.

Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy.

Learn more

" }, "CreateResourceServer":{ "name":"CreateResourceServer", @@ -882,7 +882,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Allows a user to delete their own user profile.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Allows a user to delete their own user profile.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "DeleteUserAttributes":{ @@ -904,7 +904,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Deletes the attributes for a user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Deletes the attributes for a user.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "DeleteUserPool":{ @@ -1096,7 +1096,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Forgets the specified device.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Forgets the specified device. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "ForgotPassword":{ @@ -1124,7 +1124,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Calling this API causes a message to be sent to the end user with a confirmation code that is required to change the user's password. For the Username parameter, you can use the username or user alias. The method used to send the confirmation code is sent according to the specified AccountRecoverySetting. For more information, see Recovering User Accounts in the Amazon Cognito Developer Guide. To use the confirmation code for resetting the password, call ConfirmForgotPassword.

If neither a verified phone number nor a verified email exists, this API returns InvalidParameterException. If your app client has a client secret and you don't provide a SECRET_HASH parameter, this API returns NotAuthorizedException.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "GetCSVHeader":{ @@ -1164,7 +1164,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Gets the device.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Gets the device. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "GetGroup":{ @@ -1269,7 +1269,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Gets the user attributes and metadata for a user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Gets the user attributes and metadata for a user.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "GetUserAttributeVerificationCode":{ @@ -1299,7 +1299,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Generates a user attribute verification code for the specified attribute name. Sends a message to a user with a code that they must return in a VerifyUserAttribute request.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "GetUserPoolMfaConfig":{ @@ -1337,7 +1337,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.

Other requests might be valid until your user's token expires.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Invalidates the identity, access, and refresh tokens that Amazon Cognito issued to a user. Call this operation when your user signs out of your app. This results in the following behavior.

Other requests might be valid until your user's token expires.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "InitiateAuth":{ @@ -1365,7 +1365,7 @@ {"shape":"InvalidSmsRoleTrustRelationshipException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Initiates sign-in for a user in the Amazon Cognito user directory. You can't sign in a user with a federated IdP with InitiateAuth. For more information, see Adding user pool sign-in through a third party.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "ListDevices":{ @@ -1388,7 +1388,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Lists the sign-in devices that Amazon Cognito has registered to the current user.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Lists the sign-in devices that Amazon Cognito has registered to the current user. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "ListGroups":{ @@ -1568,7 +1568,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Resends the confirmation (for confirmation of registration) to a specific user in the user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "RespondToAuthChallenge":{ @@ -1602,7 +1602,7 @@ {"shape":"SoftwareTokenMFANotFoundException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.

For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Some API operations in a user pool generate a challenge, like a prompt for an MFA code, for device authentication that bypasses MFA, or for a custom authentication challenge. A RespondToAuthChallenge API request provides the answer to that challenge, like a code or a secure remote password (SRP). The parameters of a response to an authentication challenge vary with the type of challenge.

For more information about custom authentication challenges, see Custom authentication challenge Lambda triggers.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "RevokeToken":{ @@ -1622,7 +1622,7 @@ {"shape":"UnsupportedTokenTypeException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Revokes all of the access tokens generated by, and at the same time as, the specified refresh token. After a token is revoked, you can't use the revoked token to access Amazon Cognito user APIs, or to authorize access to your resource server.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "SetLogDeliveryConfiguration":{ @@ -1697,7 +1697,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Set the user's multi-factor authentication (MFA) method preference, including which MFA factors are activated and if any are preferred. Only one factor can be set as preferred. The preferred MFA factor will be used to authenticate a user if multiple factors are activated. If multiple options are activated and no preference is set, a challenge to choose an MFA option will be returned during sign-in. If an MFA type is activated for a user, the user will be prompted for MFA during all sign-in attempts unless device tracking is turned on and the device has been trusted. If you want MFA to be applied selectively based on the assessed risk level of sign-in attempts, deactivate MFA for users and turn on Adaptive Authentication for the user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "SetUserPoolMfaConfig":{ @@ -1737,7 +1737,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

This action is no longer supported. You can use it to configure only SMS MFA. You can't use it to configure time-based one-time password (TOTP) software token MFA. To configure either type of MFA, use SetUserMFAPreference instead.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "SignUp":{ @@ -1765,7 +1765,7 @@ {"shape":"CodeDeliveryFailureException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

Registers the user in the specified user pool and creates a user name, password, and user attributes.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "StartUserImportJob":{ @@ -1855,7 +1855,7 @@ {"shape":"UserPoolAddOnNotEnabledException"}, {"shape":"InternalErrorException"} ], - "documentation":"

Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Provides the feedback for an authentication event, whether it was from a valid user or not. This feedback is used for improving the risk evaluation decision for the user pool as part of Amazon Cognito advanced security.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "UpdateDeviceStatus":{ @@ -1878,7 +1878,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Updates the device status.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Updates the device status. For more information about device authentication, see Working with user devices in your user pool.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "UpdateGroup":{ @@ -1963,7 +1963,7 @@ {"shape":"InternalErrorException"}, {"shape":"ForbiddenException"} ], - "documentation":"

With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", + "documentation":"

With this operation, your users can update one or more of their attributes with their own credentials. You authorize this API request with the user's access token. To delete an attribute from your user, submit the attribute in your API request with a blank value. Custom attribute values in this request must include the custom: prefix.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

This action might generate an SMS text message. Starting June 1, 2021, US telecom carriers require you to register an origination phone number before you can send SMS messages to US phone numbers. If you use SMS text messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in.

If you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide.

", "authtype":"none" }, "UpdateUserPool":{ @@ -2050,7 +2050,7 @@ {"shape":"CodeMismatchException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Use this API to register a user's entered time-based one-time password (TOTP) code and mark the user's software token MFA status as \"verified\" if successful. The request takes an access token or a session string, but not both.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" }, "VerifyUserAttribute":{ @@ -2076,7 +2076,7 @@ {"shape":"AliasExistsException"}, {"shape":"ForbiddenException"} ], - "documentation":"

Verifies the specified user attributes in the user pool.

If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito native and OIDC APIs.

", + "documentation":"

Verifies the specified user attributes in the user pool.

If your user pool requires verification before Amazon Cognito updates the attribute value, VerifyUserAttribute updates the affected attribute to its pending value. For more information, see UserAttributeUpdateSettingsType.

Authorize this action with a signed-in user's access token. It must include the scope aws.cognito.signin.user.admin.

Amazon Cognito doesn't evaluate Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you can't use IAM credentials to authorize requests, and you can't grant IAM permissions in policies. For more information about authorization models in Amazon Cognito, see Using the Amazon Cognito user pools API and user pool endpoints.

", "authtype":"none" } }, @@ -3809,7 +3809,7 @@ }, "ProviderDetails":{ "shape":"ProviderDetailsType", - "documentation":"

The IdP details. The following list describes the provider detail keys for each IdP type.

" + "documentation":"

The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes values must match the values listed here.

OpenID Connect (OIDC)

Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer: attributes_url, authorize_url, jwks_uri, token_url.

Create or update request: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }

Describe response: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }

SAML

Create or update request with Metadata URL: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }

Create or update request with Metadata file: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }

The value of MetadataFile must be the plaintext metadata document with all quote (\") characters escaped by backslashes.

Describe response: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }

LoginWithAmazon

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"

Describe response: \"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }

Google

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }

Describe response: \"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }

SignInWithApple

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }

Describe response: \"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }

Facebook

Create or update request: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }

Describe response: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }

" }, "AttributeMapping":{ "shape":"AttributeMappingType", @@ -3965,7 +3965,7 @@ }, "AllowedOAuthFlows":{ "shape":"OAuthFlowsType", - "documentation":"

The allowed OAuth flows.

code

Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token endpoint.

implicit

Issue the access token (and, optionally, ID token, based on scopes) directly to your user.

client_credentials

Issue the access token from the /oauth2/token endpoint directly to a non-person user using a combination of the client ID and client secret.

" + "documentation":"

The OAuth grant types that you want your app client to generate. To create an app client that generates client credentials grants, you must add client_credentials as the only allowed OAuth flow.

code

Use a code grant flow, which provides an authorization code as the response. This code can be exchanged for access tokens with the /oauth2/token endpoint.

implicit

Issue the access token (and, optionally, ID token, based on scopes) directly to your user.

client_credentials

Issue the access token from the /oauth2/token endpoint directly to a non-person user using a combination of the client ID and client secret.

" }, "AllowedOAuthScopes":{ "shape":"ScopeListType", @@ -4034,7 +4034,7 @@ "members":{ "CloudFrontDomain":{ "shape":"DomainType", - "documentation":"

The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider.

" + "documentation":"

The Amazon CloudFront endpoint that you use as the target of the alias that you set up with your Domain Name Service (DNS) provider. Amazon Cognito returns this value if you set a custom domain with CustomDomainConfig. If you set an Amazon Cognito prefix domain, this operation returns a blank response.

" } } }, @@ -5373,7 +5373,7 @@ }, "ProviderDetails":{ "shape":"ProviderDetailsType", - "documentation":"

The IdP details. The following list describes the provider detail keys for each IdP type.

" + "documentation":"

The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes values must match the values listed here.

OpenID Connect (OIDC)

Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer: attributes_url, authorize_url, jwks_uri, token_url.

Create or update request: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }

Describe response: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }

SAML

Create or update request with Metadata URL: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }

Create or update request with Metadata file: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }

The value of MetadataFile must be the plaintext metadata document with all quote (\") characters escaped by backslashes.

Describe response: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }

LoginWithAmazon

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"

Describe response: \"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }

Google

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }

Describe response: \"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }

SignInWithApple

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }

Describe response: \"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }

Facebook

Create or update request: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }

Describe response: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }

" }, "AttributeMapping":{ "shape":"AttributeMappingType", @@ -5616,14 +5616,14 @@ "shape":"ArnType", "documentation":"

The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.

Set this parameter for legacy purposes. If you also set an ARN in PreTokenGenerationConfig, its value must be identical to PreTokenGeneration. For new instances of pre token generation triggers, set the LambdaArn of PreTokenGenerationConfig.

You can set

" }, - "PreTokenGenerationConfig":{ - "shape":"PreTokenGenerationVersionConfigType", - "documentation":"

The detailed configuration of a pre token generation trigger. If you also set an ARN in PreTokenGeneration, its value must be identical to PreTokenGenerationConfig.

" - }, "UserMigration":{ "shape":"ArnType", "documentation":"

The user migration Lambda config type.

" }, + "PreTokenGenerationConfig":{ + "shape":"PreTokenGenerationVersionConfigType", + "documentation":"

The detailed configuration of a pre token generation trigger. If you also set an ARN in PreTokenGeneration, its value must be identical to PreTokenGenerationConfig.

" + }, "CustomSMSSender":{ "shape":"CustomSMSLambdaVersionConfigType", "documentation":"

A custom SMS sender Lambda trigger.

" @@ -7577,7 +7577,7 @@ }, "ProviderDetails":{ "shape":"ProviderDetailsType", - "documentation":"

The IdP details to be updated, such as MetadataURL and MetadataFile.

" + "documentation":"

The scopes, URLs, and identifiers for your external identity provider. The following examples describe the provider detail keys for each IdP type. These values and their schema are subject to change. Social IdP authorize_scopes values must match the values listed here.

OpenID Connect (OIDC)

Amazon Cognito accepts the following elements when it can't discover endpoint URLs from oidc_issuer: attributes_url, authorize_url, jwks_uri, token_url.

Create or update request: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }

Describe response: \"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }

SAML

Create or update request with Metadata URL: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }

Create or update request with Metadata file: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }

The value of MetadataFile must be the plaintext metadata document with all quote (\") characters escaped by backslashes.

Describe response: \"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }

LoginWithAmazon

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"

Describe response: \"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }

Google

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }

Describe response: \"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }

SignInWithApple

Create or update request: \"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }

Describe response: \"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }

Facebook

Create or update request: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }

Describe response: \"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }

" }, "AttributeMapping":{ "shape":"AttributeMappingType", diff -Nru awscli-2.15.9/awscli/botocore/data/comprehend/2017-11-27/service-2.json awscli-2.15.22/awscli/botocore/data/comprehend/2017-11-27/service-2.json --- awscli-2.15.9/awscli/botocore/data/comprehend/2017-11-27/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/comprehend/2017-11-27/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -2077,7 +2077,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language of the input documents. Currently, English is the only valid language.

" + "documentation":"

The language of the input documents.

" } } }, @@ -3153,7 +3153,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language of the input documents. Currently, English is the only valid language.

" + "documentation":"

The language of the input text. Enter the language code for English (en) or Spanish (es).

" } } }, @@ -5946,7 +5946,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language code of the input documents

" + "documentation":"

The language code of the input documents.

" }, "DataAccessRoleArn":{ "shape":"IamRoleArn", @@ -6725,7 +6725,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language of the input documents. Currently, English is the only valid language.

" + "documentation":"

The language of the input documents. Enter the language code for English (en) or Spanish (es).

" }, "ClientRequestToken":{ "shape":"ClientRequestTokenString", diff -Nru awscli-2.15.9/awscli/botocore/data/config/2014-11-12/service-2.json awscli-2.15.22/awscli/botocore/data/config/2014-11-12/service-2.json --- awscli-2.15.9/awscli/botocore/data/config/2014-11-12/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/config/2014-11-12/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -522,7 +522,7 @@ {"shape":"InvalidLimitException"}, {"shape":"OrganizationAccessDeniedException"} ], - "documentation":"

Returns a list of organization Config rules.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you specify organization Config rule names. It is only applicable, when you request all the organization Config rules.

For accounts within an organzation

If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

" + "documentation":"

Returns a list of organization Config rules.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you specify organization Config rule names. It is only applicable, when you request all the organization Config rules.

For accounts within an organization

If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

" }, "DescribeOrganizationConformancePackStatuses":{ "name":"DescribeOrganizationConformancePackStatuses", @@ -554,7 +554,7 @@ {"shape":"InvalidLimitException"}, {"shape":"OrganizationAccessDeniedException"} ], - "documentation":"

Returns a list of organization conformance packs.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you specify organization conformance packs names. They are only applicable, when you request all the organization conformance packs.

For accounts within an organzation

If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

" + "documentation":"

Returns a list of organization conformance packs.

When you specify the limit and the next token, you receive a paginated response.

Limit and next token are not applicable if you specify organization conformance packs names. They are only applicable, when you request all the organization conformance packs.

For accounts within an organization

If you deploy an organizational rule or conformance pack in an organization administrator account, and then establish a delegated administrator and deploy an organizational rule or conformance pack in the delegated administrator account, you won't be able to see the organizational rule or conformance pack in the organization administrator account from the delegated administrator account or see the organizational rule or conformance pack in the delegated administrator account from organization administrator account. The DescribeOrganizationConfigRules and DescribeOrganizationConformancePacks APIs can only see and interact with the organization-related resource that were deployed from within the account calling those APIs.

" }, "DescribePendingAggregationRequests":{ "name":"DescribePendingAggregationRequests", @@ -1178,7 +1178,7 @@ {"shape":"InsufficientPermissionsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Adds or updates the remediation configuration with a specific Config rule with the selected target or action. The API creates the RemediationConfiguration object for the Config rule. The Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

If you make backward incompatible changes to the SSM document, you must call this again to ensure the remediations can run.

This API does not support adding remediation configurations for service-linked Config Rules such as Organization Config rules, the rules deployed by conformance packs, and rules deployed by Amazon Web Services Security Hub.

For manual remediation configuration, you need to provide a value for automationAssumeRole or use a value in the assumeRolefield to remediate your resources. The SSM automation document can use either as long as it maps to a valid parameter.

However, for automatic remediation configuration, the only valid assumeRole field value is AutomationAssumeRole and you need to provide a value for AutomationAssumeRole to remediate your resources.

" + "documentation":"

Adds or updates the remediation configuration with a specific Config rule with the selected target or action. The API creates the RemediationConfiguration object for the Config rule. The Config rule must already exist for you to add a remediation configuration. The target (SSM document) must exist and have permissions to use the target.

Be aware of backward incompatible changes

If you make backward incompatible changes to the SSM document, you must call this again to ensure the remediations can run.

This API does not support adding remediation configurations for service-linked Config Rules such as Organization Config rules, the rules deployed by conformance packs, and rules deployed by Amazon Web Services Security Hub.

Required fields

For manual remediation configuration, you need to provide a value for automationAssumeRole or use a value in the assumeRolefield to remediate your resources. The SSM automation document can use either as long as it maps to a valid parameter.

However, for automatic remediation configuration, the only valid assumeRole field value is AutomationAssumeRole and you need to provide a value for AutomationAssumeRole to remediate your resources.

Auto remediation can be initiated even for compliant resources

If you enable auto remediation for a specific Config rule using the PutRemediationConfigurations API or the Config console, it initiates the remediation process for all non-compliant resources for that specific rule. The auto remediation process relies on the compliance data snapshot which is captured on a periodic basis. Any non-compliant resource that is updated between the snapshot schedule will continue to be remediated based on the last known compliance data snapshot.

This means that in some cases auto remediation can be initiated even for compliant resources, since the bootstrap processor uses a database that can have stale evaluation results based on the last known compliance data snapshot.

" }, "PutRemediationExceptions":{ "name":"PutRemediationExceptions", @@ -1192,7 +1192,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"InsufficientPermissionsException"} ], - "documentation":"

A remediation exception is when a specified resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.

Config generates a remediation exception when a problem occurs running a remediation action for a specified resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

When placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until the given Config rule for the specified resource evaluates the resource as NON_COMPLIANT. Once the resource has been evaluated as NON_COMPLIANT, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation. Otherwise, using auto-remediation before a NON_COMPLIANT evaluation result can delete resources before the exception is applied.

Placing an exception can only be performed on resources that are NON_COMPLIANT. If you use this API for COMPLIANT resources or resources that are NOT_APPLICABLE, a remediation exception will not be generated. For more information on the conditions that initiate the possible Config evaluation results, see Concepts | Config Rules in the Config Developer Guide.

" + "documentation":"

A remediation exception is when a specified resource is no longer considered for auto-remediation. This API adds a new exception or updates an existing exception for a specified resource with a specified Config rule.

Exceptions block auto remediation

Config generates a remediation exception when a problem occurs running a remediation action for a specified resource. Remediation exceptions blocks auto-remediation until the exception is cleared.

Manual remediation is recommended when placing an exception

When placing an exception on an Amazon Web Services resource, it is recommended that remediation is set as manual remediation until the given Config rule for the specified resource evaluates the resource as NON_COMPLIANT. Once the resource has been evaluated as NON_COMPLIANT, you can add remediation exceptions and change the remediation type back from Manual to Auto if you want to use auto-remediation. Otherwise, using auto-remediation before a NON_COMPLIANT evaluation result can delete resources before the exception is applied.

Exceptions can only be performed on non-compliant resources

Placing an exception can only be performed on resources that are NON_COMPLIANT. If you use this API for COMPLIANT resources or resources that are NOT_APPLICABLE, a remediation exception will not be generated. For more information on the conditions that initiate the possible Config evaluation results, see Concepts | Config Rules in the Config Developer Guide.

Auto remediation can be initiated even for compliant resources

If you enable auto remediation for a specific Config rule using the PutRemediationConfigurations API or the Config console, it initiates the remediation process for all non-compliant resources for that specific rule. The auto remediation process relies on the compliance data snapshot which is captured on a periodic basis. Any non-compliant resource that is updated between the snapshot schedule will continue to be remediated based on the last known compliance data snapshot.

This means that in some cases auto remediation can be initiated even for compliant resources, since the bootstrap processor uses a database that can have stale evaluation results based on the last known compliance data snapshot.

" }, "PutResourceConfig":{ "name":"PutResourceConfig", @@ -4190,7 +4190,7 @@ "documentation":"

A comma-separated list of resource types to exclude from recording by the configuration recorder.

" } }, - "documentation":"

Specifies whether the configuration recorder excludes certain resource types from being recorded. Use the resourceTypes field to enter a comma-separated list of resource types you want to exclude from recording.

By default, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

How to use the exclusion recording strategy

To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES.

Config will then record configuration changes for all supported resource types, except the resource types that you specify to exclude from being recorded.

Global resource types and the exclusion recording strategy

Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

  • Asia Pacific (Hyderabad)

  • Asia Pacific (Melbourne)

  • Europe (Spain)

  • Europe (Zurich)

  • Israel (Tel Aviv)

  • Middle East (UAE)

" + "documentation":"

Specifies whether the configuration recorder excludes certain resource types from being recorded. Use the resourceTypes field to enter a comma-separated list of resource types you want to exclude from recording.

By default, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

How to use the exclusion recording strategy

To use this option, you must set the useOnly field of RecordingStrategy to EXCLUSION_BY_RESOURCE_TYPES.

Config will then record configuration changes for all supported resource types, except the resource types that you specify to exclude from being recorded.

Global resource types and the exclusion recording strategy

Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

  • Asia Pacific (Hyderabad)

  • Asia Pacific (Melbourne)

  • Canada West (Calgary)

  • Europe (Spain)

  • Europe (Zurich)

  • Israel (Tel Aviv)

  • Middle East (UAE)

" }, "ExecutionControls":{ "type":"structure", @@ -6638,7 +6638,7 @@ }, "includeGlobalResourceTypes":{ "shape":"IncludeGlobalResourceTypes", - "documentation":"

This option is a bundle which only applies to the global IAM resource types: IAM users, groups, roles, and customer managed policies. These global IAM resource types can only be recorded by Config in Regions where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

Aurora global clusters are recorded in all enabled Regions

The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled, even if includeGlobalResourceTypes is not set to true. The includeGlobalResourceTypes option is a bundle which only applies to IAM users, groups, roles, and customer managed policies.

If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use one of the following recording strategies:

  1. Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

  2. Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

For more information, see Selecting Which Resources are Recorded in the Config developer guide.

Before you set this field to true, set the allSupported field of RecordingGroup to true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES.

Overriding fields

If you set this field to false but list global IAM resource types in the resourceTypes field of RecordingGroup, Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false.

If you do not want to record configuration changes to the global IAM resource types (IAM users, groups, roles, and customer managed policies), make sure to not list them in the resourceTypes field in addition to setting the includeGlobalResourceTypes field to false.

" + "documentation":"

This option is a bundle which only applies to the global IAM resource types: IAM users, groups, roles, and customer managed policies. These global IAM resource types can only be recorded by Config in Regions where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

Aurora global clusters are recorded in all enabled Regions

The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled, even if includeGlobalResourceTypes is setfalse. The includeGlobalResourceTypes option is a bundle which only applies to IAM users, groups, roles, and customer managed policies.

If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use one of the following recording strategies:

  1. Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

  2. Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

For more information, see Selecting Which Resources are Recorded in the Config developer guide.

includeGlobalResourceTypes and the exclusion recording strategy

The includeGlobalResourceTypes field has no impact on the EXCLUSION_BY_RESOURCE_TYPES recording strategy. This means that the global IAM resource types (IAM users, groups, roles, and customer managed policies) will not be automatically added as exclusions for exclusionByResourceTypes when includeGlobalResourceTypes is set to false.

The includeGlobalResourceTypes field should only be used to modify the AllSupported field, as the default for the AllSupported field is to record configuration changes for all supported resource types excluding the global IAM resource types. To include the global IAM resource types when AllSupported is set to true, make sure to set includeGlobalResourceTypes to true.

To exclude the global IAM resource types for the EXCLUSION_BY_RESOURCE_TYPES recording strategy, you need to manually add them to the resourceTypes field of exclusionByResourceTypes.

Required and optional fields

Before you set this field to true, set the allSupported field of RecordingGroup to true. Optionally, you can set the useOnly field of RecordingStrategy to ALL_SUPPORTED_RESOURCE_TYPES.

Overriding fields

If you set this field to false but list global IAM resource types in the resourceTypes field of RecordingGroup, Config will still record configuration changes for those specified resource types regardless of if you set the includeGlobalResourceTypes field to false.

If you do not want to record configuration changes to the global IAM resource types (IAM users, groups, roles, and customer managed policies), make sure to not list them in the resourceTypes field in addition to setting the includeGlobalResourceTypes field to false.

" }, "resourceTypes":{ "shape":"ResourceTypeList", @@ -6650,7 +6650,7 @@ }, "recordingStrategy":{ "shape":"RecordingStrategy", - "documentation":"

An object that specifies the recording strategy for the configuration recorder.

Required and optional fields

The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

Overriding fields

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

Global resources types and the resource exclusion recording strategy

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

  • Asia Pacific (Hyderabad)

  • Asia Pacific (Melbourne)

  • Europe (Spain)

  • Europe (Zurich)

  • Israel (Tel Aviv)

  • Middle East (UAE)

" + "documentation":"

An object that specifies the recording strategy for the configuration recorder.

Required and optional fields

The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

Overriding fields

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

Global resources types and the resource exclusion recording strategy

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

  • Asia Pacific (Hyderabad)

  • Asia Pacific (Melbourne)

  • Canada West (Calgary)

  • Europe (Spain)

  • Europe (Zurich)

  • Israel (Tel Aviv)

  • Middle East (UAE)

" } }, "documentation":"

Specifies which resource types Config records for configuration changes. By default, Config records configuration changes for all current and future supported resource types in the Amazon Web Services Region where you have enabled Config, excluding the global IAM resource types: IAM users, groups, roles, and customer managed policies.

In the recording group, you specify whether you want to record all supported current and future supported resource types or to include or exclude specific resources types. For a list of supported resource types, see Supported Resource Types in the Config developer guide.

If you don't want Config to record all current and future supported resource types (excluding the global IAM resource types), use one of the following recording strategies:

  1. Record all current and future resource types with exclusions (EXCLUSION_BY_RESOURCE_TYPES), or

  2. Record specific resource types (INCLUSION_BY_RESOURCE_TYPES).

If you use the recording strategy to Record all current and future resource types (ALL_SUPPORTED_RESOURCE_TYPES), you can use the flag includeGlobalResourceTypes to include the global IAM resource types in your recording.

Aurora global clusters are recorded in all enabled Regions

The AWS::RDS::GlobalCluster resource type will be recorded in all supported Config Regions where the configuration recorder is enabled.

If you do not want to record AWS::RDS::GlobalCluster in all enabled Regions, use the EXCLUSION_BY_RESOURCE_TYPES or INCLUSION_BY_RESOURCE_TYPES recording strategy.

" @@ -6707,7 +6707,7 @@ "members":{ "useOnly":{ "shape":"RecordingStrategyType", - "documentation":"

The recording strategy for the configuration recorder.

Required and optional fields

The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

Overriding fields

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

Global resource types and the exclusion recording strategy

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

  • Asia Pacific (Hyderabad)

  • Asia Pacific (Melbourne)

  • Europe (Spain)

  • Europe (Zurich)

  • Israel (Tel Aviv)

  • Middle East (UAE)

" + "documentation":"

The recording strategy for the configuration recorder.

Required and optional fields

The recordingStrategy field is optional when you set the allSupported field of RecordingGroup to true.

The recordingStrategy field is optional when you list resource types in the resourceTypes field of RecordingGroup.

The recordingStrategy field is required if you list resource types to exclude from recording in the resourceTypes field of ExclusionByResourceTypes.

Overriding fields

If you choose EXCLUSION_BY_RESOURCE_TYPES for the recording strategy, the exclusionByResourceTypes field will override other properties in the request.

For example, even if you set includeGlobalResourceTypes to false, global IAM resource types will still be automatically recorded in this option unless those resource types are specifically listed as exclusions in the resourceTypes field of exclusionByResourceTypes.

Global resource types and the exclusion recording strategy

By default, if you choose the EXCLUSION_BY_RESOURCE_TYPES recording strategy, when Config adds support for a new resource type in the Region where you set up the configuration recorder, including global resource types, Config starts recording resources of that type automatically.

Unless specifically listed as exclusions, AWS::RDS::GlobalCluster will be recorded automatically in all supported Config Regions were the configuration recorder is enabled.

IAM users, groups, roles, and customer managed policies will be recorded in the Region where you set up the configuration recorder if that is a Region where Config was available before February 2022. You cannot be record the global IAM resouce types in Regions supported by Config after February 2022. This list where you cannot record the global IAM resource types includes the following Regions:

  • Asia Pacific (Hyderabad)

  • Asia Pacific (Melbourne)

  • Canada West (Calgary)

  • Europe (Spain)

  • Europe (Zurich)

  • Israel (Tel Aviv)

  • Middle East (UAE)

" } }, "documentation":"

Specifies the recording strategy of the configuration recorder.

" @@ -8248,7 +8248,7 @@ "members":{ "DocumentName":{ "shape":"SSMDocumentName", - "documentation":"

The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, Config checks only your account and Amazon Web Services Region for the SSM document. If you want to use an SSM document from another Region or account, you must provide the ARN.

" + "documentation":"

The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, Config checks only your account and Amazon Web Services Region for the SSM document.

" }, "DocumentVersion":{ "shape":"SSMDocumentVersion", diff -Nru awscli-2.15.9/awscli/botocore/data/connect/2017-08-08/service-2.json awscli-2.15.22/awscli/botocore/data/connect/2017-08-08/service-2.json --- awscli-2.15.9/awscli/botocore/data/connect/2017-08-08/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/connect/2017-08-08/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -10130,14 +10130,14 @@ "members":{ "Value":{ "shape":"EvaluationNoteString", - "documentation":"

The note for an item (section or question) in a contact evaluation.

" + "documentation":"

The note for an item (section or question) in a contact evaluation.

Even though a note in an evaluation can have up to 3072 chars, there is also a limit on the total number of chars for all the notes in the evaluation combined. Assuming there are N questions in the evaluation being submitted, then the max char limit for all notes combined is N x 1024.

" } }, "documentation":"

Information about notes for a contact evaluation.

" }, "EvaluationNoteString":{ "type":"string", - "max":1024, + "max":3072, "min":0 }, "EvaluationNotesMap":{ @@ -10490,15 +10490,15 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

The filters to apply to returned metrics. You can filter up to the following limits:

Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.

Currently tagging is only supported on the resources that are passed in the filter.

" + "documentation":"

The filters to apply to returned metrics. You can filter up to the following limits:

Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.

When using the RoutingStepExpression filter, you need to pass exactly one QueueId. The filter is also case sensitive so when using the RoutingStepExpression filter, grouping by ROUTING_STEP_EXPRESSION is required.

Currently tagging is only supported on the resources that are passed in the filter.

" }, "Groupings":{ "shape":"Groupings", - "documentation":"

The grouping applied to the metrics returned. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues.

" + "documentation":"

The grouping applied to the metrics returned. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues.

" }, "CurrentMetrics":{ "shape":"CurrentMetrics", - "documentation":"

The metrics to retrieve. Specify the name and unit for each metric. The following metrics are available. For a description of all the metrics, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

Name in real-time metrics report: ACW

AGENTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Available

AGENTS_ERROR

Unit: COUNT

Name in real-time metrics report: Error

AGENTS_NON_PRODUCTIVE

Unit: COUNT

Name in real-time metrics report: NPT (Non-Productive Time)

AGENTS_ON_CALL

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ON_CONTACT

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ONLINE

Unit: COUNT

Name in real-time metrics report: Online

AGENTS_STAFFED

Unit: COUNT

Name in real-time metrics report: Staffed

CONTACTS_IN_QUEUE

Unit: COUNT

Name in real-time metrics report: In queue

CONTACTS_SCHEDULED

Unit: COUNT

Name in real-time metrics report: Scheduled

OLDEST_CONTACT_AGE

Unit: SECONDS

When you use groupings, Unit says SECONDS and the Value is returned in SECONDS.

When you do not use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this:

{ \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0 }

The actual OLDEST_CONTACT_AGE is 24 seconds.

Name in real-time metrics report: Oldest

SLOTS_ACTIVE

Unit: COUNT

Name in real-time metrics report: Active

SLOTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Availability

" + "documentation":"

The metrics to retrieve. Specify the name and unit for each metric. The following metrics are available. For a description of all the metrics, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

Name in real-time metrics report: ACW

AGENTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Available

AGENTS_ERROR

Unit: COUNT

Name in real-time metrics report: Error

AGENTS_NON_PRODUCTIVE

Unit: COUNT

Name in real-time metrics report: NPT (Non-Productive Time)

AGENTS_ON_CALL

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ON_CONTACT

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ONLINE

Unit: COUNT

Name in real-time metrics report: Online

AGENTS_STAFFED

Unit: COUNT

Name in real-time metrics report: Staffed

CONTACTS_IN_QUEUE

Unit: COUNT

Name in real-time metrics report: In queue

CONTACTS_SCHEDULED

Unit: COUNT

Name in real-time metrics report: Scheduled

OLDEST_CONTACT_AGE

Unit: SECONDS

When you use groupings, Unit says SECONDS and the Value is returned in SECONDS.

When you do not use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this:

{ \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0 }

The actual OLDEST_CONTACT_AGE is 24 seconds.

When the filter RoutingStepExpression is used, this metric is still calculated from enqueue time. For example, if a contact that has been queued under <Expression 1> for 10 seconds has expired and <Expression 2> becomes active, then OLDEST_CONTACT_AGE for this queue will be counted starting from 10, not 0.

Name in real-time metrics report: Oldest

SLOTS_ACTIVE

Unit: COUNT

Name in real-time metrics report: Active

SLOTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Availability

" }, "NextToken":{ "shape":"NextToken", @@ -10685,11 +10685,11 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

The queues, up to 100, or channels, to use to filter the metrics returned. Metric data is retrieved only for the resources associated with the queues or channels included in the filter. You can include both queue IDs and queue ARNs in the same request. VOICE, CHAT, and TASK channels are supported.

To filter by Queues, enter the queue ID/ARN, not the name of the queue.

" + "documentation":"

The queues, up to 100, or channels, to use to filter the metrics returned. Metric data is retrieved only for the resources associated with the queues or channels included in the filter. You can include both queue IDs and queue ARNs in the same request. VOICE, CHAT, and TASK channels are supported.

RoutingStepExpression is not a valid filter for GetMetricData and we recommend switching to GetMetricDataV2 for more up-to-date features.

To filter by Queues, enter the queue ID/ARN, not the name of the queue.

" }, "Groupings":{ "shape":"Groupings", - "documentation":"

The grouping applied to the metrics returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values returned apply to the metrics for each queue rather than aggregated for all queues.

If no grouping is specified, a summary of metrics for all queues is returned.

" + "documentation":"

The grouping applied to the metrics returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values returned apply to the metrics for each queue rather than aggregated for all queues.

If no grouping is specified, a summary of metrics for all queues is returned.

RoutingStepExpression is not a valid filter for GetMetricData and we recommend switching to GetMetricDataV2 for more up-to-date features.

" }, "HistoricalMetrics":{ "shape":"HistoricalMetrics", @@ -10747,15 +10747,15 @@ }, "Filters":{ "shape":"FiltersV2List", - "documentation":"

The filters to apply to returned metrics. You can filter on the following resources:

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.

To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide.

Note the following limits:

" + "documentation":"

The filters to apply to returned metrics. You can filter on the following resources:

At least one filter must be passed from queues, routing profiles, agents, or user hierarchy groups.

To filter by phone number, see Create a historical metrics report in the Amazon Connect Administrator's Guide.

Note the following limits:

" }, "Groupings":{ "shape":"GroupingsV2", - "documentation":"

The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.

If no grouping is specified, a summary of all metrics is returned.

Valid grouping keys: QUEUE | ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE, contact/segmentAttributes/connect:Subtype

" + "documentation":"

The grouping applied to the metrics that are returned. For example, when results are grouped by queue, the metrics returned are grouped by queue. The values that are returned apply to the metrics for each queue. They are not aggregated for all queues.

If no grouping is specified, a summary of all metrics is returned.

Valid grouping keys: QUEUE | ROUTING_PROFILE | AGENT | CHANNEL | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE, contact/segmentAttributes/connect:Subtype | ROUTING_STEP_EXPRESSION

" }, "Metrics":{ "shape":"MetricsV2", - "documentation":"

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

ABANDONMENT_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

AGENT_ADHERENT_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_ANSWER_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_NON_ADHERENT_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_NON_RESPONSE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

AGENT_OCCUPANCY

Unit: Percentage

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

AGENT_SCHEDULE_ADHERENCE

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_SCHEDULED_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AVG_ABANDON_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

AVG_ACTIVE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AVG_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_AGENT_CONNECTING_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

The Negate key in Metric Level Filters is not applicable for this metric.

AVG_AGENT_PAUSE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AVG_CONTACT_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_CONVERSATION_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

AVG_GREETING_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME_ALL_CONTACTS

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_HOLDS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_INTERACTION_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_INTERRUPTIONS_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_INTERRUPTION_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_QUEUE_ANSWER_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_RESOLUTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

AVG_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_ABANDONED

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_CREATED

Unit: Count

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

CONTACTS_HANDLED

Unit: Count

Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

CONTACTS_HOLD_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_ON_HOLD_AGENT_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_PUT_ON_HOLD

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_TRANSFERRED_OUT_EXTERNAL

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_TRANSFERRED_OUT_INTERNAL

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_QUEUED

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_RESOLVED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

CONTACTS_TRANSFERRED_OUT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

CONTACTS_TRANSFERRED_OUT_BY_AGENT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

MAX_QUEUED_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

SERVICE_LEVEL

You can include up to 20 SERVICE_LEVEL metrics in a request.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

SUM_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_CONNECTING_TIME_AGENT

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

The Negate key in Metric Level Filters is not applicable for this metric.

SUM_CONTACT_FLOW_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_CONTACT_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_CONTACTS_ANSWERED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

SUM_CONTACTS_ABANDONED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

SUM_CONTACTS_DISCONNECTED

Valid metric filter key: DISCONNECT_REASON

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

SUM_ERROR_STATUS_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_HOLD_TIME

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_IDLE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

SUM_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_INTERACTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_NON_PRODUCTIVE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

SUM_ONLINE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

SUM_RETRY_CALLBACK_ATTEMPTS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

" + "documentation":"

The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator's Guide.

ABANDONMENT_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

AGENT_ADHERENT_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_ANSWER_RATE

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_NON_ADHERENT_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_NON_RESPONSE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

Data for this metric is available starting from October 1, 2023 0:00:00 GMT.

AGENT_OCCUPANCY

Unit: Percentage

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

AGENT_SCHEDULE_ADHERENCE

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AGENT_SCHEDULED_TIME

This metric is available only in Amazon Web Services Regions where Forecasting, capacity planning, and scheduling is available.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AVG_ABANDON_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

AVG_ACTIVE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AVG_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_AGENT_CONNECTING_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. For now, this metric only supports the following as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

The Negate key in Metric Level Filters is not applicable for this metric.

AVG_AGENT_PAUSE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

AVG_CONTACT_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_CONVERSATION_DURATION

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

AVG_GREETING_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_HOLD_TIME_ALL_CONTACTS

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_HOLDS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_INTERACTION_TIME

Unit: Seconds

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_INTERRUPTIONS_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_INTERRUPTION_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_QUEUE_ANSWER_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

AVG_RESOLUTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

AVG_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

AVG_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_ABANDONED

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, RoutingStepExpression

CONTACTS_CREATED

Unit: Count

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Routing Profile, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

CONTACTS_HANDLED

Unit: Count

Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression

Feature is a valid filter but not a valid grouping.

CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT

Unit: Count

Valid metric filter key: INITIATION_METHOD

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_HOLD_ABANDONS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_ON_HOLD_AGENT_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_ON_HOLD_CUSTOMER_DISCONNECT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_PUT_ON_HOLD

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_TRANSFERRED_OUT_EXTERNAL

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_TRANSFERRED_OUT_INTERNAL

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

CONTACTS_QUEUED

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_QUEUED_BY_ENQUEUE

Unit: Count

Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_RESOLVED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

CONTACTS_TRANSFERRED_OUT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype

Feature is a valid filter but not a valid grouping.

CONTACTS_TRANSFERRED_OUT_BY_AGENT

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

CONTACTS_TRANSFERRED_OUT_FROM_QUEUE

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

MAX_QUEUED_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_CONTACTS_STEP_EXPIRED

Unit: Percent

Valid groupings and filters: Queue, RoutingStepExpression

PERCENT_CONTACTS_STEP_JOINED

Unit: Percent

Valid groupings and filters: Queue, RoutingStepExpression

PERCENT_NON_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_TALK_TIME

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_TALK_TIME_AGENT

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

PERCENT_TALK_TIME_CUSTOMER

This metric is available only for contacts analyzed by Contact Lens conversational analytics.

Unit: Percentage

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

SERVICE_LEVEL

You can include up to 20 SERVICE_LEVEL metrics in a request.

Unit: Percent

Valid groupings and filters: Queue, Channel, Routing Profile

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

STEP_CONTACTS_QUEUED

Unit: Count

Valid groupings and filters: Queue, RoutingStepExpression

SUM_AFTER_CONTACT_WORK_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_CONNECTING_TIME_AGENT

Unit: Seconds

Valid metric filter key: INITIATION_METHOD. This metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

The Negate key in Metric Level Filters is not applicable for this metric.

SUM_CONTACT_FLOW_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_CONTACT_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_CONTACTS_ANSWERED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

SUM_CONTACTS_ABANDONED_IN_X

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\").

SUM_CONTACTS_DISCONNECTED

Valid metric filter key: DISCONNECT_REASON

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype

SUM_ERROR_STATUS_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_HANDLE_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_HOLD_TIME

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_IDLE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

SUM_INTERACTION_AND_HOLD_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_INTERACTION_TIME

Unit: Seconds

Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy

SUM_NON_PRODUCTIVE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

SUM_ONLINE_TIME_AGENT

Unit: Seconds

Valid groupings and filters: Routing Profile, Agent, Agent Hierarchy

SUM_RETRY_CALLBACK_ATTEMPTS

Unit: Count

Valid groupings and filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype

" }, "NextToken":{ "shape":"NextToken2500", @@ -10914,7 +10914,7 @@ "members":{ "Id":{ "shape":"TrafficDistributionGroupIdOrArn", - "documentation":"

The identifier of the traffic distribution group.

", + "documentation":"

The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.

", "location":"uri", "locationName":"Id" } @@ -10963,7 +10963,7 @@ "GroupingsV2":{ "type":"list", "member":{"shape":"GroupingV2"}, - "max":2 + "max":3 }, "HierarchyGroup":{ "type":"structure", @@ -11625,7 +11625,8 @@ "EARLY_MEDIA", "MULTI_PARTY_CONFERENCE", "HIGH_VOLUME_OUTBOUND", - "ENHANCED_CONTACT_MONITORING" + "ENHANCED_CONTACT_MONITORING", + "ENHANCED_CHAT_MONITORING" ] }, "InstanceAttributeValue":{ @@ -14310,7 +14311,7 @@ }, "AllowedMonitorCapabilities":{ "shape":"AllowedMonitorCapabilities", - "documentation":"

Specify which monitoring actions the user is allowed to take. For example, whether the user is allowed to escalate from silent monitoring to barge.

" + "documentation":"

Specify which monitoring actions the user is allowed to take. For example, whether the user is allowed to escalate from silent monitoring to barge. AllowedMonitorCapabilities is required if barge is enabled.

" }, "ClientToken":{ "shape":"ClientToken", @@ -14536,7 +14537,8 @@ "AGENT", "CUSTOMER", "SYSTEM", - "CUSTOM_BOT" + "CUSTOM_BOT", + "SUPERVISOR" ] }, "ParticipantTimerAction":{ @@ -15057,7 +15059,7 @@ }, "PredefinedAttributeName":{ "type":"string", - "max":128, + "max":64, "min":1 }, "PredefinedAttributeSearchConditionList":{ @@ -15085,13 +15087,13 @@ }, "PredefinedAttributeStringValue":{ "type":"string", - "max":128, + "max":64, "min":1 }, "PredefinedAttributeStringValuesList":{ "type":"list", "member":{"shape":"PredefinedAttributeStringValue"}, - "max":75, + "max":128, "min":1 }, "PredefinedAttributeSummary":{ @@ -20713,7 +20715,7 @@ "members":{ "Id":{ "shape":"TrafficDistributionGroupIdOrArn", - "documentation":"

The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.

", + "documentation":"

The identifier of the traffic distribution group. This can be the ID or the ARN if the API is being called in the Region where the traffic distribution group was created. The ARN must be provided if the call is from the replicated Region.

", "location":"uri", "locationName":"Id" }, diff -Nru awscli-2.15.9/awscli/botocore/data/connectcampaigns/2021-01-30/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/connectcampaigns/2021-01-30/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/connectcampaigns/2021-01-30/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/connectcampaigns/2021-01-30/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/connectcampaigns/2021-01-30/service-2.json awscli-2.15.22/awscli/botocore/data/connectcampaigns/2021-01-30/service-2.json --- awscli-2.15.9/awscli/botocore/data/connectcampaigns/2021-01-30/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/connectcampaigns/2021-01-30/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -472,7 +472,8 @@ "type":"string", "documentation":"

The value of the attribute.

", "max":32767, - "min":0 + "min":0, + "pattern":".*" }, "Attributes":{ "type":"map", @@ -530,7 +531,8 @@ "type":"string", "documentation":"

Identifier representing a Campaign

", "max":256, - "min":0 + "min":0, + "pattern":"[\\S]*" }, "CampaignName":{ "type":"string", @@ -573,8 +575,9 @@ "ClientToken":{ "type":"string", "documentation":"

Client provided parameter used for idempotency. Its value must be unique for each request.

", - "max":64, - "min":0 + "max":200, + "min":0, + "pattern":"[a-zA-Z0-9_\\-.]*" }, "ConflictException":{ "type":"structure", @@ -686,6 +689,7 @@ "documentation":"

The phone number of the customer, in E.164 format.

", "max":20, "min":0, + "pattern":"[\\d\\-+]*", "sensitive":true }, "DialRequest":{ @@ -706,7 +710,10 @@ }, "DialRequestId":{ "type":"string", - "documentation":"

Identifier representing a Dial request

" + "documentation":"

Identifier representing a Dial request

", + "max":256, + "min":0, + "pattern":"[a-zA-Z0-9_\\-.]*" }, "DialRequestList":{ "type":"list", @@ -903,7 +910,8 @@ "type":"string", "documentation":"

Amazon Connect Instance Id

", "max":256, - "min":0 + "min":0, + "pattern":"[a-zA-Z0-9_\\-.]*" }, "InstanceIdFilter":{ "type":"structure", diff -Nru awscli-2.15.9/awscli/botocore/data/connectcases/2022-10-03/service-2.json awscli-2.15.22/awscli/botocore/data/connectcases/2022-10-03/service-2.json --- awscli-2.15.9/awscli/botocore/data/connectcases/2022-10-03/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/connectcases/2022-10-03/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -68,7 +68,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ConflictException"} ], - "documentation":"

Creates a case in the specified Cases domain. Case system and custom fields are taken as an array id/value pairs with a declared data types.

The following fields are required when creating a case:

 <ul> <li> <p> <code>customer_id</code> - You must provide the full customer profile ARN in this format: <code>arn:aws:profile:your_AWS_Region:your_AWS_account ID:domains/your_profiles_domain_name/profiles/profile_ID</code> </p> </li> <li> <p> <code>title</code> </p> </li> </ul> 
", + "documentation":"

If you provide a value for PerformedBy.UserArn you must also have connect:DescribeUser permission on the User ARN resource that you provide

 <p>Creates a case in the specified Cases domain. Case system and custom fields are taken as an array id/value pairs with a declared data types.</p> <p>The following fields are required when creating a case:</p> <ul> <li> <p> <code>customer_id</code> - You must provide the full customer profile ARN in this format: <code>arn:aws:profile:your_AWS_Region:your_AWS_account ID:domains/your_profiles_domain_name/profiles/profile_ID</code> </p> </li> <li> <p> <code>title</code> </p> </li> </ul> 
", "idempotent":true }, "CreateDomain":{ @@ -211,6 +211,24 @@ ], "documentation":"

Returns information about a specific case if it exists.

" }, + "GetCaseAuditEvents":{ + "name":"GetCaseAuditEvents", + "http":{ + "method":"POST", + "requestUri":"/domains/{domainId}/cases/{caseId}/audit-history", + "responseCode":200 + }, + "input":{"shape":"GetCaseAuditEventsRequest"}, + "output":{"shape":"GetCaseAuditEventsResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns the audit history about a specific case if it exists.

" + }, "GetCaseEventConfiguration":{ "name":"GetCaseEventConfiguration", "http":{ @@ -515,7 +533,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Updates the values of fields on a case. Fields to be updated are received as an array of id/value pairs identical to the CreateCase input .

If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.

" + "documentation":"

If you provide a value for PerformedBy.UserArn you must also have connect:DescribeUser permission on the User ARN resource that you provide

 <p>Updates the values of fields on a case. Fields to be updated are received as an array of id/value pairs identical to the <code>CreateCase</code> input .</p> <p>If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body.</p> 
" }, "UpdateField":{ "name":"UpdateField", @@ -602,6 +620,131 @@ "type":"timestamp", "timestampFormat":"iso8601" }, + "AuditEvent":{ + "type":"structure", + "required":[ + "eventId", + "fields", + "performedTime", + "type" + ], + "members":{ + "eventId":{ + "shape":"AuditEventId", + "documentation":"

Unique identifier of a case audit history event.

" + }, + "fields":{ + "shape":"AuditEventFieldList", + "documentation":"

A list of Case Audit History event fields.

" + }, + "performedBy":{ + "shape":"AuditEventPerformedBy", + "documentation":"

Information of the user which performed the audit.

" + }, + "performedTime":{ + "shape":"AuditEventDateTime", + "documentation":"

Time at which an Audit History event took place.

" + }, + "relatedItemType":{ + "shape":"RelatedItemType", + "documentation":"

The Type of the related item.

" + }, + "type":{ + "shape":"AuditEventType", + "documentation":"

The Type of an audit history event.

" + } + }, + "documentation":"

Represents the content of a particular audit event.

" + }, + "AuditEventDateTime":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "AuditEventField":{ + "type":"structure", + "required":[ + "eventFieldId", + "newValue" + ], + "members":{ + "eventFieldId":{ + "shape":"AuditEventFieldId", + "documentation":"

Unique identifier of field in an Audit History entry.

" + }, + "newValue":{ + "shape":"AuditEventFieldValueUnion", + "documentation":"

Union of potential field value types.

" + }, + "oldValue":{ + "shape":"AuditEventFieldValueUnion", + "documentation":"

Union of potential field value types.

" + } + }, + "documentation":"

Fields for audit event.

" + }, + "AuditEventFieldId":{ + "type":"string", + "max":500, + "min":1 + }, + "AuditEventFieldList":{ + "type":"list", + "member":{"shape":"AuditEventField"} + }, + "AuditEventFieldValueUnion":{ + "type":"structure", + "members":{ + "booleanValue":{ + "shape":"Boolean", + "documentation":"

Can be either null, or have a Boolean value type. Only one value can be provided.

" + }, + "doubleValue":{ + "shape":"Double", + "documentation":"

Can be either null, or have a Double value type. Only one value can be provided.

" + }, + "emptyValue":{"shape":"EmptyFieldValue"}, + "stringValue":{ + "shape":"AuditEventFieldValueUnionStringValueString", + "documentation":"

Can be either null, or have a String value type. Only one value can be provided.

" + }, + "userArnValue":{ + "shape":"String", + "documentation":"

Can be either null, or have a String value type formatted as an ARN. Only one value can be provided.

" + } + }, + "documentation":"

Object to store union of Field values.

This data type is a UNION, so only one of the following members can be specified when used or returned.

", + "union":true + }, + "AuditEventFieldValueUnionStringValueString":{ + "type":"string", + "max":500, + "min":0 + }, + "AuditEventId":{ + "type":"string", + "max":500, + "min":1 + }, + "AuditEventPerformedBy":{ + "type":"structure", + "required":["iamPrincipalArn"], + "members":{ + "iamPrincipalArn":{ + "shape":"IamPrincipalArn", + "documentation":"

Unique identifier of an IAM role.

" + }, + "user":{"shape":"UserUnion"} + }, + "documentation":"

Information of the user which performed the audit.

" + }, + "AuditEventType":{ + "type":"string", + "enum":[ + "Case.Created", + "Case.Updated", + "RelatedItem.Created" + ] + }, "BasicLayout":{ "type":"structure", "members":{ @@ -935,6 +1078,7 @@ "shape":"CreateCaseRequestFieldsList", "documentation":"

An array of objects with field ID (matching ListFields/DescribeField) and value union data.

" }, + "performedBy":{"shape":"UserUnion"}, "templateId":{ "shape":"TemplateId", "documentation":"

A unique identifier of a template.

" @@ -1534,7 +1678,8 @@ "Boolean", "DateTime", "SingleSelect", - "Url" + "Url", + "User" ] }, "FieldValue":{ @@ -1573,9 +1718,13 @@ "stringValue":{ "shape":"FieldValueUnionStringValueString", "documentation":"

String value type.

" + }, + "userArnValue":{ + "shape":"String", + "documentation":"

Represents the user that performed the audit.

" } }, - "documentation":"

Object to store union of Field values.

", + "documentation":"

Object to store union of Field values.

The Summary system field accepts 1500 characters while all other fields accept 500 characters.

", "union":true }, "FieldValueUnionStringValueString":{ @@ -1583,6 +1732,61 @@ "max":1500, "min":0 }, + "GetCaseAuditEventsRequest":{ + "type":"structure", + "required":[ + "caseId", + "domainId" + ], + "members":{ + "caseId":{ + "shape":"CaseId", + "documentation":"

A unique identifier of the case.

", + "location":"uri", + "locationName":"caseId" + }, + "domainId":{ + "shape":"DomainId", + "documentation":"

The unique identifier of the Cases domain.

", + "location":"uri", + "locationName":"domainId" + }, + "maxResults":{ + "shape":"GetCaseAuditEventsRequestMaxResultsInteger", + "documentation":"

The maximum number of audit events to return. The current maximum supported value is 25. This is also the default when no other value is provided.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + } + } + }, + "GetCaseAuditEventsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, + "GetCaseAuditEventsResponse":{ + "type":"structure", + "required":["auditEvents"], + "members":{ + "auditEvents":{ + "shape":"GetCaseAuditEventsResponseAuditEventsList", + "documentation":"

A list of case audits where each represents a particular edit of the case.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. This is null if there are no more results to return.

" + } + } + }, + "GetCaseAuditEventsResponseAuditEventsList":{ + "type":"list", + "member":{"shape":"AuditEvent"}, + "max":25, + "min":0 + }, "GetCaseEventConfigurationRequest":{ "type":"structure", "required":["domainId"], @@ -1877,6 +2081,11 @@ } } }, + "IamPrincipalArn":{ + "type":"string", + "max":500, + "min":1 + }, "Integer":{ "type":"integer", "box":true @@ -2842,7 +3051,8 @@ "fields":{ "shape":"UpdateCaseRequestFieldsList", "documentation":"

An array of objects with fieldId (matching ListFields/DescribeField) and value union data, structured identical to CreateCase.

" - } + }, + "performedBy":{"shape":"UserUnion"} } }, "UpdateCaseRequestFieldsList":{ diff -Nru awscli-2.15.9/awscli/botocore/data/connectparticipant/2018-09-07/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/connectparticipant/2018-09-07/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/connectparticipant/2018-09-07/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/connectparticipant/2018-09-07/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/connectparticipant/2018-09-07/service-2.json awscli-2.15.22/awscli/botocore/data/connectparticipant/2018-09-07/service-2.json --- awscli-2.15.9/awscli/botocore/data/connectparticipant/2018-09-07/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/connectparticipant/2018-09-07/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -29,7 +29,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"} ], - "documentation":"

Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Allows you to confirm that the attachment has been uploaded using the pre-signed URL provided in StartAttachmentUpload API. A conflict exception is thrown when an attachment with that identifier is already being uploaded.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "CreateParticipantConnection":{ "name":"CreateParticipantConnection", @@ -110,7 +110,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

Retrieves a transcript of the session, including details about any attachments. For information about accessing past chat contact transcripts for a persistent chat, see Enable persistent chat.

If you have a process that consumes events in the transcript of an chat that has ended, note that chat transcripts contain the following event content types if the event has occurred during the chat session:

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "SendEvent":{ "name":"SendEvent", @@ -124,9 +124,10 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ConflictException"} ], - "documentation":"

Sends an event.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" + "documentation":"

The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType will no longer be supported starting December 31, 2024. This event has been migrated to the CreateParticipantConnection API using the ConnectParticipant field.

Sends an event. Message receipts are not supported when there are more than two active participants in the chat. Using the SendEvent API for message receipts when a supervisor is barged-in will result in a conflict exception.

ConnectionToken is used for invoking this API instead of ParticipantToken.

The Amazon Connect Participant Service APIs do not use Signature Version 4 authentication.

" }, "SendMessage":{ "name":"SendMessage", @@ -301,7 +302,7 @@ "members":{ "Message":{"shape":"Reason"} }, - "documentation":"

An attachment with that identifier is already being uploaded.

", + "documentation":"

The requested operation conflicts with the current state of a service resource associated with the request.

", "error":{"httpStatusCode":409}, "exception":true }, @@ -634,7 +635,8 @@ "AGENT", "CUSTOMER", "SYSTEM", - "CUSTOM_BOT" + "CUSTOM_BOT", + "SUPERVISOR" ] }, "ParticipantToken":{ @@ -706,7 +708,8 @@ "PARTICIPANT", "HIERARCHY_LEVEL", "HIERARCHY_GROUP", - "USER" + "USER", + "PHONE_NUMBER" ] }, "ScanDirection":{ @@ -725,7 +728,7 @@ "members":{ "ContentType":{ "shape":"ChatContentType", - "documentation":"

The content type of the request. Supported types are:

" + "documentation":"

The content type of the request. Supported types are:

" }, "Content":{ "shape":"ChatContent", diff -Nru awscli-2.15.9/awscli/botocore/data/controltower/2018-05-10/paginators-1.json awscli-2.15.22/awscli/botocore/data/controltower/2018-05-10/paginators-1.json --- awscli-2.15.9/awscli/botocore/data/controltower/2018-05-10/paginators-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/controltower/2018-05-10/paginators-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -11,6 +11,18 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "landingZones" + }, + "ListBaselines": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "baselines" + }, + "ListEnabledBaselines": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "enabledBaselines" } } } diff -Nru awscli-2.15.9/awscli/botocore/data/controltower/2018-05-10/service-2.json awscli-2.15.22/awscli/botocore/data/controltower/2018-05-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/controltower/2018-05-10/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/controltower/2018-05-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -50,6 +50,27 @@ "documentation":"

Decommissions a landing zone. This API call starts an asynchronous operation that deletes Amazon Web Services Control Tower resources deployed in accounts managed by Amazon Web Services Control Tower.

", "idempotent":true }, + "DisableBaseline":{ + "name":"DisableBaseline", + "http":{ + "method":"POST", + "requestUri":"/disable-baseline", + "responseCode":200 + }, + "input":{"shape":"DisableBaselineInput"}, + "output":{"shape":"DisableBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Disable an EnabledBaseline resource on the specified Target. This API starts an asynchronous operation to remove all resources deployed as part of the baseline enablement. The resource will vary depending on the enabled baseline.

", + "idempotent":true + }, "DisableControl":{ "name":"DisableControl", "http":{ @@ -68,7 +89,27 @@ {"shape":"ThrottlingException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

This API call turns off a control. It starts an asynchronous operation that deletes Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + "documentation":"

This API call turns off a control. It starts an asynchronous operation that deletes AWS resources on the specified organizational unit and the accounts it contains. The resources will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

" + }, + "EnableBaseline":{ + "name":"EnableBaseline", + "http":{ + "method":"POST", + "requestUri":"/enable-baseline", + "responseCode":200 + }, + "input":{"shape":"EnableBaselineInput"}, + "output":{"shape":"EnableBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Enable (apply) a Baseline to a Target. This API starts an asynchronous operation to deploy resources specified by the Baseline to the specified Target.

" }, "EnableControl":{ "name":"EnableControl", @@ -90,6 +131,42 @@ ], "documentation":"

This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, + "GetBaseline":{ + "name":"GetBaseline", + "http":{ + "method":"POST", + "requestUri":"/get-baseline", + "responseCode":200 + }, + "input":{"shape":"GetBaselineInput"}, + "output":{"shape":"GetBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieve details about an existing Baseline resource by specifying its identifier.

" + }, + "GetBaselineOperation":{ + "name":"GetBaselineOperation", + "http":{ + "method":"POST", + "requestUri":"/get-baseline-operation", + "responseCode":200 + }, + "input":{"shape":"GetBaselineOperationInput"}, + "output":{"shape":"GetBaselineOperationOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns the details of an asynchronous baseline operation, as initiated by any of these APIs: EnableBaseline, DisableBaseline, UpdateEnabledBaseline, ResetEnabledBaseline. A status message is displayed in case of operation failure.

" + }, "GetControlOperation":{ "name":"GetControlOperation", "http":{ @@ -108,6 +185,24 @@ ], "documentation":"

Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, + "GetEnabledBaseline":{ + "name":"GetEnabledBaseline", + "http":{ + "method":"POST", + "requestUri":"/get-enabled-baseline", + "responseCode":200 + }, + "input":{"shape":"GetEnabledBaselineInput"}, + "output":{"shape":"GetEnabledBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieve details of an EnabledBaseline resource by specifying its identifier.

" + }, "GetEnabledControl":{ "name":"GetEnabledControl", "http":{ @@ -162,6 +257,40 @@ ], "documentation":"

Returns the status of the specified landing zone operation. Details for an operation are available for 60 days.

" }, + "ListBaselines":{ + "name":"ListBaselines", + "http":{ + "method":"POST", + "requestUri":"/list-baselines", + "responseCode":200 + }, + "input":{"shape":"ListBaselinesInput"}, + "output":{"shape":"ListBaselinesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a summary list of all available baselines.

" + }, + "ListEnabledBaselines":{ + "name":"ListEnabledBaselines", + "http":{ + "method":"POST", + "requestUri":"/list-enabled-baselines", + "responseCode":200 + }, + "input":{"shape":"ListEnabledBaselinesInput"}, + "output":{"shape":"ListEnabledBaselinesOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns a list of summaries describing EnabledBaseline resources. You can filter the list by the corresponding Baseline or Target of the EnabledBaseline resources.

" + }, "ListEnabledControls":{ "name":"ListEnabledControls", "http":{ @@ -213,6 +342,26 @@ ], "documentation":"

Returns a list of tags associated with the resource. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, + "ResetEnabledBaseline":{ + "name":"ResetEnabledBaseline", + "http":{ + "method":"POST", + "requestUri":"/reset-enabled-baseline", + "responseCode":200 + }, + "input":{"shape":"ResetEnabledBaselineInput"}, + "output":{"shape":"ResetEnabledBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Re-enables an EnabledBaseline resource. For example, this API can re-apply the existing Baseline after a new member account is moved to the target OU.

" + }, "ResetLandingZone":{ "name":"ResetLandingZone", "http":{ @@ -264,6 +413,26 @@ ], "documentation":"

Removes tags from a resource. For usage examples, see the Amazon Web Services Control Tower User Guide .

" }, + "UpdateEnabledBaseline":{ + "name":"UpdateEnabledBaseline", + "http":{ + "method":"POST", + "requestUri":"/update-enabled-baseline", + "responseCode":200 + }, + "input":{"shape":"UpdateEnabledBaselineInput"}, + "output":{"shape":"UpdateEnabledBaselineOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates an EnabledBaseline resource's applied parameters or version.

" + }, "UpdateEnabledControl":{ "name":"UpdateEnabledControl", "http":{ @@ -324,6 +493,89 @@ "min":20, "pattern":"^arn:aws[0-9a-zA-Z_\\-:\\/]+$" }, + "BaselineArn":{ + "type":"string", + "pattern":"^arn:[a-z-]+:controltower:[a-z0-9-]*:[0-9]{0,12}:baseline/[A-Z0-9]{16}$" + }, + "BaselineOperation":{ + "type":"structure", + "members":{ + "endTime":{ + "shape":"Timestamp", + "documentation":"

The end time of the operation (if applicable), in ISO 8601 format.

" + }, + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The identifier of the specified operation.

" + }, + "operationType":{ + "shape":"BaselineOperationType", + "documentation":"

An enumerated type (enum) with possible values of ENABLE_BASELINE, DISABLE_BASELINE, UPDATE_ENABLED_BASELINE, or RESET_ENABLED_BASELINE.

" + }, + "startTime":{ + "shape":"Timestamp", + "documentation":"

The start time of the operation, in ISO 8601 format.

" + }, + "status":{ + "shape":"BaselineOperationStatus", + "documentation":"

An enumerated type (enum) with possible values of SUCCEEDED, FAILED, or IN_PROGRESS.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

A status message that gives more information about the operation's status, if applicable.

" + } + }, + "documentation":"

An object of shape BaselineOperation, returning details about the specified Baseline operation ID.

" + }, + "BaselineOperationStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "IN_PROGRESS" + ] + }, + "BaselineOperationType":{ + "type":"string", + "enum":[ + "ENABLE_BASELINE", + "DISABLE_BASELINE", + "UPDATE_ENABLED_BASELINE", + "RESET_ENABLED_BASELINE" + ] + }, + "BaselineSummary":{ + "type":"structure", + "required":[ + "arn", + "name" + ], + "members":{ + "arn":{ + "shape":"String", + "documentation":"

The full ARN of a Baseline.

" + }, + "description":{ + "shape":"String", + "documentation":"

A summary description of a Baseline.

" + }, + "name":{ + "shape":"String", + "documentation":"

The human-readable name of a Baseline.

" + } + }, + "documentation":"

Returns a summary of information about a Baseline object.

" + }, + "BaselineVersion":{ + "type":"string", + "max":10, + "min":1, + "pattern":"^\\d+(?:\\.\\d+){0,2}$" + }, + "Baselines":{ + "type":"list", + "member":{"shape":"BaselineSummary"} + }, "ConflictException":{ "type":"structure", "required":["message"], @@ -394,7 +646,7 @@ "members":{ "manifest":{ "shape":"Manifest", - "documentation":"

The manifest JSON file is a text file that describes your Amazon Web Services resources. For examples, review Launch your landing zone.

" + "documentation":"

The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file.

" }, "tags":{ "shape":"TagMap", @@ -443,6 +695,26 @@ } } }, + "DisableBaselineInput":{ + "type":"structure", + "required":["enabledBaselineIdentifier"], + "members":{ + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

Identifier of the EnabledBaseline resource to be deactivated, in ARN format.

" + } + } + }, + "DisableBaselineOutput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The ID (in UUID format) of the asynchronous DisableBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

" + } + } + }, "DisableControlInput":{ "type":"structure", "required":[ @@ -452,7 +724,7 @@ "members":{ "controlIdentifier":{ "shape":"ControlIdentifier", - "documentation":"

The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the landing zone Region deny control. For information on how to find the controlIdentifier, see the overview page.

" + "documentation":"

The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny control. For information on how to find the controlIdentifier, see the overview page.

" }, "targetIdentifier":{ "shape":"TargetIdentifier", @@ -495,6 +767,53 @@ }, "documentation":"

The drift summary of the enabled control.

Amazon Web Services Control Tower expects the enabled control configuration to include all supported and governed Regions. If the enabled control differs from the expected configuration, it is defined to be in a state of drift. You can repair this drift by resetting the enabled control.

" }, + "EnableBaselineInput":{ + "type":"structure", + "required":[ + "baselineIdentifier", + "baselineVersion", + "targetIdentifier" + ], + "members":{ + "baselineIdentifier":{ + "shape":"Arn", + "documentation":"

The ARN of the baseline to be enabled.

" + }, + "baselineVersion":{ + "shape":"BaselineVersion", + "documentation":"

The specific version to be enabled of the specified baseline.

" + }, + "parameters":{ + "shape":"EnabledBaselineParameters", + "documentation":"

A list of key-value objects that specify enablement parameters, where key is a string and value is a document of any type.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Tags associated with input to EnableBaseline.

" + }, + "targetIdentifier":{ + "shape":"Arn", + "documentation":"

The ARN of the target on which the baseline will be enabled. Only OUs are supported as targets.

" + } + } + }, + "EnableBaselineOutput":{ + "type":"structure", + "required":[ + "arn", + "operationIdentifier" + ], + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The ARN of the EnabledBaseline resource.

" + }, + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The ID (in UUID format) of the asynchronous EnableBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

" + } + } + }, "EnableControlInput":{ "type":"structure", "required":[ @@ -504,11 +823,11 @@ "members":{ "controlIdentifier":{ "shape":"ControlIdentifier", - "documentation":"

The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the landing zone Region deny control. For information on how to find the controlIdentifier, see the overview page.

" + "documentation":"

The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny control. For information on how to find the controlIdentifier, see the overview page.

" }, "parameters":{ "shape":"EnabledControlParameters", - "documentation":"

An array of EnabledControlParameter objects

" + "documentation":"

A list of input parameter values, which are specified to configure the control when you enable it.

" }, "tags":{ "shape":"TagMap", @@ -534,6 +853,148 @@ } } }, + "EnabledBaselineBaselineIdentifiers":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":5, + "min":1 + }, + "EnabledBaselineDetails":{ + "type":"structure", + "required":[ + "arn", + "baselineIdentifier", + "statusSummary", + "targetIdentifier" + ], + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The ARN of the EnabledBaseline resource.

" + }, + "baselineIdentifier":{ + "shape":"String", + "documentation":"

The specific Baseline enabled as part of the EnabledBaseline resource.

" + }, + "baselineVersion":{ + "shape":"String", + "documentation":"

The enabled version of the Baseline.

" + }, + "parameters":{ + "shape":"EnabledBaselineParameterSummaries", + "documentation":"

Shows the parameters that are applied when enabling this Baseline.

" + }, + "statusSummary":{"shape":"EnablementStatusSummary"}, + "targetIdentifier":{ + "shape":"String", + "documentation":"

The target on which to enable the Baseline.

" + } + }, + "documentation":"

Details of the EnabledBaseline resource.

" + }, + "EnabledBaselineFilter":{ + "type":"structure", + "members":{ + "baselineIdentifiers":{ + "shape":"EnabledBaselineBaselineIdentifiers", + "documentation":"

Identifiers for the Baseline objects returned as part of the filter operation.

" + }, + "targetIdentifiers":{ + "shape":"EnabledBaselineTargetIdentifiers", + "documentation":"

Identifiers for the targets of the Baseline filter operation.

" + } + }, + "documentation":"

A filter applied on the ListEnabledBaseline operation. Allowed filters are baselineIdentifiers and targetIdentifiers. The filter can be applied for either, or both.

" + }, + "EnabledBaselineParameter":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"String", + "documentation":"

A string denoting the parameter key.

" + }, + "value":{ + "shape":"EnabledBaselineParameterDocument", + "documentation":"

A low-level Document object of any type (for example, a Java Object).

" + } + }, + "documentation":"

A key-value parameter to an EnabledBaseline resource.

" + }, + "EnabledBaselineParameterDocument":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "EnabledBaselineParameterSummaries":{ + "type":"list", + "member":{"shape":"EnabledBaselineParameterSummary"} + }, + "EnabledBaselineParameterSummary":{ + "type":"structure", + "required":[ + "key", + "value" + ], + "members":{ + "key":{ + "shape":"String", + "documentation":"

A string denoting the parameter key.

" + }, + "value":{ + "shape":"EnabledBaselineParameterDocument", + "documentation":"

A low-level document object of any type (for example, a Java Object).

" + } + }, + "documentation":"

Summary of an applied parameter to an EnabledBaseline resource.

" + }, + "EnabledBaselineParameters":{ + "type":"list", + "member":{"shape":"EnabledBaselineParameter"} + }, + "EnabledBaselineSummary":{ + "type":"structure", + "required":[ + "arn", + "baselineIdentifier", + "statusSummary", + "targetIdentifier" + ], + "members":{ + "arn":{ + "shape":"Arn", + "documentation":"

The ARN of the EnabledBaseline resource

" + }, + "baselineIdentifier":{ + "shape":"String", + "documentation":"

The specific baseline that is enabled as part of the EnabledBaseline resource.

" + }, + "baselineVersion":{ + "shape":"String", + "documentation":"

The enabled version of the baseline.

" + }, + "statusSummary":{"shape":"EnablementStatusSummary"}, + "targetIdentifier":{ + "shape":"String", + "documentation":"

The target upon which the baseline is enabled.

" + } + }, + "documentation":"

Returns a summary of information about an EnabledBaseline object.

" + }, + "EnabledBaselineTargetIdentifiers":{ + "type":"list", + "member":{"shape":"Arn"}, + "max":5, + "min":1 + }, + "EnabledBaselines":{ + "type":"list", + "member":{"shape":"EnabledBaselineSummary"} + }, "EnabledControlDetails":{ "type":"structure", "members":{ @@ -577,14 +1038,14 @@ "members":{ "key":{ "shape":"String", - "documentation":"

The key of a key/value pair. It is of type string.

" + "documentation":"

The key of a key/value pair.

" }, "value":{ "shape":"Document", - "documentation":"

The value of a key/value pair. It can be of type array string, number, object, or boolean.

" + "documentation":"

The value of a key/value pair.

" } }, - "documentation":"

A set of parameters that configure the behavior of the enabled control. A key/value pair, where Key is of type String and Value is of type Document.

" + "documentation":"

A key/value pair, where Key is of type String and Value is of type Document.

" }, "EnabledControlParameterSummaries":{ "type":"list", @@ -664,6 +1125,57 @@ }, "documentation":"

The deployment summary of the enabled control.

" }, + "GetBaselineInput":{ + "type":"structure", + "required":["baselineIdentifier"], + "members":{ + "baselineIdentifier":{ + "shape":"BaselineArn", + "documentation":"

The ARN of the Baseline resource to be retrieved.

" + } + } + }, + "GetBaselineOperationInput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The operation ID returned from mutating asynchronous APIs (Enable, Disable, Update, Reset).

" + } + } + }, + "GetBaselineOperationOutput":{ + "type":"structure", + "required":["baselineOperation"], + "members":{ + "baselineOperation":{ + "shape":"BaselineOperation", + "documentation":"

A baselineOperation object that shows information about the specified operation ID.

" + } + } + }, + "GetBaselineOutput":{ + "type":"structure", + "required":[ + "arn", + "name" + ], + "members":{ + "arn":{ + "shape":"BaselineArn", + "documentation":"

The baseline ARN.

" + }, + "description":{ + "shape":"String", + "documentation":"

A description of the baseline.

" + }, + "name":{ + "shape":"String", + "documentation":"

A user-friendly name for the baseline.

" + } + } + }, "GetControlOperationInput":{ "type":"structure", "required":["operationIdentifier"], @@ -684,6 +1196,25 @@ } } }, + "GetEnabledBaselineInput":{ + "type":"structure", + "required":["enabledBaselineIdentifier"], + "members":{ + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

Identifier of the EnabledBaseline resource to be retrieved, in ARN format.

" + } + } + }, + "GetEnabledBaselineOutput":{ + "type":"structure", + "members":{ + "enabledBaselineDetails":{ + "shape":"EnabledBaselineDetails", + "documentation":"

Details of the EnabledBaseline resource.

" + } + } + }, "GetEnabledControlInput":{ "type":"structure", "required":["enabledControlIdentifier"], @@ -781,11 +1312,11 @@ }, "manifest":{ "shape":"Manifest", - "documentation":"

The landing zone manifest JSON text file that specifies the landing zone configurations.

" + "documentation":"

The landing zone manifest.yaml text file that specifies the landing zone configurations.

" }, "status":{ "shape":"LandingZoneStatus", - "documentation":"

The landing zone deployment status.

" + "documentation":"

The landing zone deployment status. One of ACTIVE, PROCESSING, FAILED.

" }, "version":{ "shape":"LandingZoneVersion", @@ -878,6 +1409,80 @@ "min":3, "pattern":"^\\d+.\\d+$" }, + "ListBaselinesInput":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"ListBaselinesMaxResults", + "documentation":"

The maximum number of results to be shown.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

A pagination token.

" + } + } + }, + "ListBaselinesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":4 + }, + "ListBaselinesOutput":{ + "type":"structure", + "required":["baselines"], + "members":{ + "baselines":{ + "shape":"Baselines", + "documentation":"

A list of Baseline object details.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

A pagination token.

" + } + } + }, + "ListEnabledBaselinesInput":{ + "type":"structure", + "members":{ + "filter":{ + "shape":"EnabledBaselineFilter", + "documentation":"

A filter applied on the ListEnabledBaseline operation. Allowed filters are baselineIdentifiers and targetIdentifiers. The filter can be applied for either, or both.

" + }, + "maxResults":{ + "shape":"ListEnabledBaselinesMaxResults", + "documentation":"

The maximum number of results to be shown.

" + }, + "nextToken":{ + "shape":"ListEnabledBaselinesNextToken", + "documentation":"

A pagination token.

" + } + } + }, + "ListEnabledBaselinesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":5 + }, + "ListEnabledBaselinesNextToken":{ + "type":"string", + "pattern":"\\S+" + }, + "ListEnabledBaselinesOutput":{ + "type":"structure", + "required":["enabledBaselines"], + "members":{ + "enabledBaselines":{ + "shape":"EnabledBaselines", + "documentation":"

Retuens a list of summaries of EnabledBaseline resources.

" + }, + "nextToken":{ + "shape":"ListEnabledBaselinesNextToken", + "documentation":"

A pagination token.

" + } + } + }, "ListEnabledControlsInput":{ "type":"structure", "required":["targetIdentifier"], @@ -1004,6 +1609,26 @@ "max":50, "min":1 }, + "ResetEnabledBaselineInput":{ + "type":"structure", + "required":["enabledBaselineIdentifier"], + "members":{ + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

Specifies the ID of the EnabledBaseline resource to be re-enabled, in ARN format.

" + } + } + }, + "ResetEnabledBaselineOutput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The ID (in UUID format) of the asynchronous ResetEnabledBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

" + } + } + }, "ResetLandingZoneInput":{ "type":"structure", "required":["landingZoneIdentifier"], @@ -1123,13 +1748,13 @@ }, "retryAfterSeconds":{ "shape":"Integer", - "documentation":"

The number of seconds to wait before retrying.

", + "documentation":"

The number of seconds the caller should wait before retrying.

", "location":"header", "locationName":"Retry-After" }, "serviceCode":{ "shape":"String", - "documentation":"

The ID of the service that is associated with the error.

" + "documentation":"

The ID of the service that is associated with the error.

" } }, "documentation":"

The request was denied due to request throttling.

", @@ -1170,6 +1795,37 @@ "members":{ } }, + "UpdateEnabledBaselineInput":{ + "type":"structure", + "required":[ + "baselineVersion", + "enabledBaselineIdentifier" + ], + "members":{ + "baselineVersion":{ + "shape":"BaselineVersion", + "documentation":"

Specifies the new Baseline version, to which the EnabledBaseline should be updated.

" + }, + "enabledBaselineIdentifier":{ + "shape":"Arn", + "documentation":"

Specifies the EnabledBaseline resource to be updated.

" + }, + "parameters":{ + "shape":"EnabledBaselineParameters", + "documentation":"

Parameters to apply when making an update.

" + } + } + }, + "UpdateEnabledBaselineOutput":{ + "type":"structure", + "required":["operationIdentifier"], + "members":{ + "operationIdentifier":{ + "shape":"OperationIdentifier", + "documentation":"

The ID (in UUID format) of the asynchronous UpdateEnabledBaseline operation. This operationIdentifier is used to track status through calls to the GetBaselineOperation API.

" + } + } + }, "UpdateEnabledControlInput":{ "type":"structure", "required":[ @@ -1211,7 +1867,7 @@ }, "manifest":{ "shape":"Manifest", - "documentation":"

The manifest JSON file is a text file that describes your Amazon Web Services resources. For examples, review Launch your landing zone.

" + "documentation":"

The manifest.yaml file is a text file that describes your Amazon Web Services resources. For examples, review The manifest file.

" }, "version":{ "shape":"LandingZoneVersion", diff -Nru awscli-2.15.9/awscli/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json awscli-2.15.22/awscli/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json --- awscli-2.15.9/awscli/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cost-optimization-hub/2022-07-26/paginators-1.sdk-extras.json 2024-02-21 17:34:54.000000000 +0000 @@ -2,6 +2,11 @@ "version": 1.0, "merge": { "pagination": { + "ListEnrollmentStatuses": { + "non_aggregate_keys": [ + "includeMemberAccounts" + ] + }, "ListRecommendationSummaries": { "non_aggregate_keys": [ "groupBy", diff -Nru awscli-2.15.9/awscli/botocore/data/cost-optimization-hub/2022-07-26/service-2.json awscli-2.15.22/awscli/botocore/data/cost-optimization-hub/2022-07-26/service-2.json --- awscli-2.15.9/awscli/botocore/data/cost-optimization-hub/2022-07-26/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/cost-optimization-hub/2022-07-26/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -844,7 +844,7 @@ "members":{ "accountId":{ "shape":"AccountId", - "documentation":"

The enrollment status of a specific account ID in the organization.

" + "documentation":"

The account ID of a member account in the organization.

" }, "includeOrganizationInfo":{ "shape":"PrimitiveBoolean", @@ -863,9 +863,13 @@ "ListEnrollmentStatusesResponse":{ "type":"structure", "members":{ + "includeMemberAccounts":{ + "shape":"Boolean", + "documentation":"

The enrollment status of all member accounts in the organization if the account is the management account.

" + }, "items":{ "shape":"AccountEnrollmentStatuses", - "documentation":"

The account enrollment statuses.

" + "documentation":"

The enrollment status of a specific account ID, including creation and last updated timestamps.

" }, "nextToken":{ "shape":"String", diff -Nru awscli-2.15.9/awscli/botocore/data/datasync/2018-11-09/service-2.json awscli-2.15.22/awscli/botocore/data/datasync/2018-11-09/service-2.json --- awscli-2.15.9/awscli/botocore/data/datasync/2018-11-09/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/datasync/2018-11-09/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -69,7 +69,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for a Microsoft Azure Blob Storage container that DataSync can use as a transfer source or destination.

Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.

" + "documentation":"

Creates a transfer location for a Microsoft Azure Blob Storage container. DataSync can use this location as a transfer source or destination.

Before you begin, make sure you know how DataSync accesses Azure Blob Storage and works with access tiers and blob types. You also need a DataSync agent that can connect to your container.

" }, "CreateLocationEfs":{ "name":"CreateLocationEfs", @@ -83,7 +83,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon EFS file system that DataSync can access for a transfer. For more information, see Creating a location for Amazon EFS.

" + "documentation":"

Creates a transfer location for an Amazon EFS file system. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses Amazon EFS file systems.

" }, "CreateLocationFsxLustre":{ "name":"CreateLocationFsxLustre", @@ -97,7 +97,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon FSx for Lustre file system.

" + "documentation":"

Creates a transfer location for an Amazon FSx for Lustre file system. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses FSx for Lustre file systems.

" }, "CreateLocationFsxOntap":{ "name":"CreateLocationFsxOntap", @@ -111,7 +111,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon FSx for NetApp ONTAP file system that DataSync can use for a data transfer.

Before you begin, make sure that you understand how DataSync accesses an FSx for ONTAP file system.

" + "documentation":"

Creates a transfer location for an Amazon FSx for NetApp ONTAP file system. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses FSx for ONTAP file systems.

" }, "CreateLocationFsxOpenZfs":{ "name":"CreateLocationFsxOpenZfs", @@ -125,7 +125,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon FSx for OpenZFS file system that DataSync can access for a transfer. For more information, see Creating a location for FSx for OpenZFS.

Request parameters related to SMB aren't supported with the CreateLocationFsxOpenZfs operation.

" + "documentation":"

Creates a transfer location for an Amazon FSx for OpenZFS file system. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses FSx for OpenZFS file systems.

Request parameters related to SMB aren't supported with the CreateLocationFsxOpenZfs operation.

" }, "CreateLocationFsxWindows":{ "name":"CreateLocationFsxWindows", @@ -139,7 +139,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an Amazon FSx for Windows File Server file system that DataSync can use for a data transfer.

Before you begin, make sure that you understand how DataSync accesses an FSx for Windows File Server.

" + "documentation":"

Creates a transfer location for an Amazon FSx for Windows File Server file system. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses FSx for Windows File Server file systems.

" }, "CreateLocationHdfs":{ "name":"CreateLocationHdfs", @@ -153,7 +153,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for a Hadoop Distributed File System (HDFS).

" + "documentation":"

Creates a transfer location for a Hadoop Distributed File System (HDFS). DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses HDFS clusters.

" }, "CreateLocationNfs":{ "name":"CreateLocationNfs", @@ -167,7 +167,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for a Network File System (NFS) file server that DataSync can use for a data transfer.

For more information, see Configuring transfers to or from an NFS file server.

If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone.

" + "documentation":"

Creates a transfer location for a Network File System (NFS) file server. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses NFS file servers.

If you're copying data to or from an Snowcone device, you can also use CreateLocationNfs to create your transfer location. For more information, see Configuring transfers with Snowcone.

" }, "CreateLocationObjectStorage":{ "name":"CreateLocationObjectStorage", @@ -181,7 +181,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for an object storage system that DataSync can access for a transfer. For more information, see Creating a location for object storage.

" + "documentation":"

Creates a transfer location for an object storage system. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand the prerequisites for DataSync to work with object storage systems.

" }, "CreateLocationS3":{ "name":"CreateLocationS3", @@ -195,7 +195,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

A location is an endpoint for an Amazon S3 bucket. DataSync can use the location as a source or destination for copying data.

Before you create your location, make sure that you read the following sections:

For more information, see Creating an Amazon S3 location.

" + "documentation":"

Creates a transfer location for an Amazon S3 bucket. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you read the following topics:

For more information, see Configuring transfers with Amazon S3.

" }, "CreateLocationSmb":{ "name":"CreateLocationSmb", @@ -209,7 +209,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Creates an endpoint for a Server Message Block (SMB) file server that DataSync can use for a data transfer.

Before you begin, make sure that you understand how DataSync accesses an SMB file server.

" + "documentation":"

Creates a transfer location for a Server Message Block (SMB) file server. DataSync can use this location as a source or destination for transferring data.

Before you begin, make sure that you understand how DataSync accesses SMB file servers.

" }, "CreateTask":{ "name":"CreateTask", @@ -251,7 +251,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Deletes the configuration of a location used by DataSync.

" + "documentation":"

Deletes a transfer location resource from DataSync.

" }, "DeleteTask":{ "name":"DeleteTask", @@ -265,7 +265,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Deletes an DataSync transfer task.

" + "documentation":"

Deletes a transfer task resource from DataSync.

" }, "DescribeAgent":{ "name":"DescribeAgent", @@ -322,7 +322,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata about your DataSync location for an Amazon EFS file system.

" + "documentation":"

Provides details about how an DataSync transfer location for an Amazon EFS file system is configured.

" }, "DescribeLocationFsxLustre":{ "name":"DescribeLocationFsxLustre", @@ -336,7 +336,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Provides details about how an DataSync location for an Amazon FSx for Lustre file system is configured.

" + "documentation":"

Provides details about how an DataSync transfer location for an Amazon FSx for Lustre file system is configured.

" }, "DescribeLocationFsxOntap":{ "name":"DescribeLocationFsxOntap", @@ -350,7 +350,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Provides details about how an DataSync location for an Amazon FSx for NetApp ONTAP file system is configured.

If your location uses SMB, the DescribeLocationFsxOntap operation doesn't actually return a Password.

" + "documentation":"

Provides details about how an DataSync transfer location for an Amazon FSx for NetApp ONTAP file system is configured.

If your location uses SMB, the DescribeLocationFsxOntap operation doesn't actually return a Password.

" }, "DescribeLocationFsxOpenZfs":{ "name":"DescribeLocationFsxOpenZfs", @@ -364,7 +364,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Provides details about how an DataSync location for an Amazon FSx for OpenZFS file system is configured.

Response elements related to SMB aren't supported with the DescribeLocationFsxOpenZfs operation.

" + "documentation":"

Provides details about how an DataSync transfer location for an Amazon FSx for OpenZFS file system is configured.

Response elements related to SMB aren't supported with the DescribeLocationFsxOpenZfs operation.

" }, "DescribeLocationFsxWindows":{ "name":"DescribeLocationFsxWindows", @@ -378,7 +378,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata about an Amazon FSx for Windows File Server location, such as information about its path.

" + "documentation":"

Provides details about how an DataSync transfer location for an Amazon FSx for Windows File Server file system is configured.

" }, "DescribeLocationHdfs":{ "name":"DescribeLocationHdfs", @@ -392,7 +392,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata, such as the authentication information about the Hadoop Distributed File System (HDFS) location.

" + "documentation":"

Provides details about how an DataSync transfer location for a Hadoop Distributed File System (HDFS) is configured.

" }, "DescribeLocationNfs":{ "name":"DescribeLocationNfs", @@ -420,7 +420,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata about your DataSync location for an object storage system.

" + "documentation":"

Provides details about how an DataSync transfer location for an object storage system is configured.

" }, "DescribeLocationS3":{ "name":"DescribeLocationS3", @@ -434,7 +434,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata, such as bucket name, about an Amazon S3 bucket location.

" + "documentation":"

Provides details about how an DataSync transfer location for an S3 bucket is configured.

" }, "DescribeLocationSmb":{ "name":"DescribeLocationSmb", @@ -448,7 +448,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns metadata, such as the path and user information about an SMB location.

" + "documentation":"

Provides details about how an DataSync transfer location for a Server Message Block (SMB) file server is configured.

" }, "DescribeStorageSystem":{ "name":"DescribeStorageSystem", @@ -622,7 +622,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Returns a list of executed tasks.

" + "documentation":"

Returns a list of executions for an DataSync transfer task.

" }, "ListTasks":{ "name":"ListTasks", @@ -851,7 +851,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalException"} ], - "documentation":"

Updates the configuration of a DataSync transfer task.

" + "documentation":"

Updates the configuration of an DataSync transfer task.

" }, "UpdateTaskExecution":{ "name":"UpdateTaskExecution", @@ -1353,15 +1353,15 @@ }, "User":{ "shape":"SmbUser", - "documentation":"

Specifies the user who has the permissions to access files, folders, and metadata in your file system.

For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations.

" + "documentation":"

Specifies the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system.

For information about choosing a user with the right level of access for your transfer, see required permissions for FSx for Windows File Server locations.

" }, "Domain":{ "shape":"SmbDomain", - "documentation":"

Specifies the name of the Windows domain that the FSx for Windows File Server belongs to.

If you have multiple domains in your environment, configuring this parameter makes sure that DataSync connects to the right file server.

For more information, see required permissions for FSx for Windows File Server locations.

" + "documentation":"

Specifies the name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file system.

" }, "Password":{ "shape":"SmbPassword", - "documentation":"

Specifies the password of the user who has the permissions to access files and folders in the file system.

For more information, see required permissions for FSx for Windows File Server locations.

" + "documentation":"

Specifies the password of the user with the permissions to mount and access the files, folders, and file metadata in your FSx for Windows File Server file system.

" } } }, @@ -1556,24 +1556,24 @@ "members":{ "Subdirectory":{ "shape":"S3Subdirectory", - "documentation":"

A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is used to read data from the S3 source location or write data to the S3 destination.

" + "documentation":"

Specifies a prefix in the S3 bucket that DataSync reads from or writes to (depending on whether the bucket is a source or destination location).

DataSync can't transfer objects with a prefix that begins with a slash (/) or includes //, /./, or /../ patterns. For example:

  • /photos

  • photos//2006/January

  • photos/./2006/February

  • photos/../2006/March

" }, "S3BucketArn":{ "shape":"S3BucketArn", - "documentation":"

The ARN of the Amazon S3 bucket. If the bucket is on an Amazon Web Services Outpost, this must be an access point ARN.

" + "documentation":"

Specifies the ARN of the S3 bucket that you want to use as a location. (When creating your DataSync task later, you specify whether this location is a transfer source or destination.)

If your S3 bucket is located on an Outposts resource, you must specify an Amazon S3 access point. For more information, see Managing data access with Amazon S3 access points in the Amazon S3 User Guide.

" }, "S3StorageClass":{ "shape":"S3StorageClass", - "documentation":"

The Amazon S3 storage class that you want to store your files in when this location is used as a task destination. For buckets in Amazon Web Services Regions, the storage class defaults to Standard. For buckets on Outposts, the storage class defaults to Amazon Web Services S3 Outposts.

For more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see Considerations when working with S3 storage classes in DataSync.

" + "documentation":"

Specifies the storage class that you want your objects to use when Amazon S3 is a transfer destination.

For buckets in Amazon Web Services Regions, the storage class defaults to STANDARD. For buckets on Outposts, the storage class defaults to OUTPOSTS.

For more information, see Storage class considerations with Amazon S3 transfers.

" }, "S3Config":{"shape":"S3Config"}, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

If you're using DataSync on an Amazon Web Services Outpost, specify the Amazon Resource Names (ARNs) of the DataSync agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see Deploy your DataSync agent on Outposts.

" + "documentation":"

(Amazon S3 on Outposts only) Specifies the Amazon Resource Name (ARN) of the DataSync agent on your Outpost.

For more information, see Deploy your DataSync agent on Outposts.

" }, "Tags":{ "shape":"InputTagList", - "documentation":"

The key-value pair that represents the tag that you want to add to the location. The value can be an empty string. We recommend using tags to name your resources.

" + "documentation":"

Specifies labels that help you categorize, filter, and search for your Amazon Web Services resources. We recommend creating at least a name tag for your transfer location.

" } }, "documentation":"

CreateLocationS3Request

" @@ -1583,7 +1583,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the source Amazon S3 bucket location that is created.

" + "documentation":"

The ARN of the S3 location that you created.

" } }, "documentation":"

CreateLocationS3Response

" @@ -1600,7 +1600,7 @@ "members":{ "Subdirectory":{ "shape":"SmbSubdirectory", - "documentation":"

Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, /path/to/subdirectory). Make sure that other SMB clients in your network can also mount this path.

To copy all data in the specified subdirectory, DataSync must be able to mount the SMB share and access all of its data. For more information, see required permissions for SMB locations.

" + "documentation":"

Specifies the name of the share exported by your SMB file server where DataSync will read or write data. You can include a subdirectory in the share path (for example, /path/to/subdirectory). Make sure that other SMB clients in your network can also mount this path.

To copy all data in the subdirectory, DataSync must be able to mount the SMB share and access all of its data. For more information, see required permissions for SMB locations.

" }, "ServerHostname":{ "shape":"ServerHostname", @@ -1608,11 +1608,11 @@ }, "User":{ "shape":"SmbUser", - "documentation":"

Specifies the user name that can mount your SMB file server and has permission to access the files and folders involved in your transfer.

For information about choosing a user with the right level of access for your transfer, see required permissions for SMB locations.

" + "documentation":"

Specifies the user that can mount and access the files, folders, and file metadata in your SMB file server.

For information about choosing a user with the right level of access for your transfer, see required permissions for SMB locations.

" }, "Domain":{ "shape":"SmbDomain", - "documentation":"

Specifies the Windows domain name that your SMB file server belongs to.

If you have multiple domains in your environment, configuring this parameter makes sure that DataSync connects to the right file server.

For more information, see required permissions for SMB locations.

" + "documentation":"

Specifies the name of the Active Directory domain that your SMB file server belongs to.

If you have multiple Active Directory domains in your environment, configuring this parameter makes sure that DataSync connects to the right file server.

" }, "Password":{ "shape":"SmbPassword", @@ -1686,9 +1686,13 @@ "shape":"FilterList", "documentation":"

Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

" }, + "ManifestConfig":{ + "shape":"ManifestConfig", + "documentation":"

Configures a manifest, which is a list of files or objects that you want DataSync to transfer. For more information and configuration examples, see Specifying what DataSync transfers by using a manifest.

When using this parameter, your caller identity (the role that you're using DataSync with) must have the iam:PassRole permission. The AWSDataSyncFullAccess policy includes this permission.

" + }, "TaskReportConfig":{ "shape":"TaskReportConfig", - "documentation":"

Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.

" + "documentation":"

Specifies how you want to configure a task report, which provides detailed information about your DataSync transfer. For more information, see Monitoring your DataSync transfers with task reports.

When using this parameter, your caller identity (the role that you're using DataSync with) must have the iam:PassRole permission. The AWSDataSyncFullAccess policy includes this permission.

" } }, "documentation":"

CreateTaskRequest

" @@ -2054,7 +2058,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows File Server location to describe.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the FSx for Windows File Server location.

" } } }, @@ -2063,15 +2067,15 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the FSx for Windows File Server location that was described.

" + "documentation":"

The ARN of the FSx for Windows File Server location.

" }, "LocationUri":{ "shape":"LocationUri", - "documentation":"

The URL of the FSx for Windows File Server location that was described.

" + "documentation":"

The uniform resource identifier (URI) of the FSx for Windows File Server location.

" }, "SecurityGroupArns":{ "shape":"Ec2SecurityGroupArnList", - "documentation":"

The Amazon Resource Names (ARNs) of the security groups that are configured for the FSx for Windows File Server file system.

" + "documentation":"

The ARNs of the security groups that are configured for the FSx for Windows File Server file system.

" }, "CreationTime":{ "shape":"Time", @@ -2079,11 +2083,11 @@ }, "User":{ "shape":"SmbUser", - "documentation":"

The user who has the permissions to access files and folders in the FSx for Windows File Server file system.

" + "documentation":"

The user with the permissions to mount and access the FSx for Windows File Server file system.

" }, "Domain":{ "shape":"SmbDomain", - "documentation":"

The name of the Windows domain that the FSx for Windows File Server belongs to.

" + "documentation":"

The name of the Microsoft Active Directory domain that the FSx for Windows File Server file system belongs to.

" } } }, @@ -2093,7 +2097,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the HDFS cluster location to describe.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the HDFS location.

" } } }, @@ -2102,15 +2106,15 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The ARN of the HDFS cluster location.

" + "documentation":"

The ARN of the HDFS location.

" }, "LocationUri":{ "shape":"LocationUri", - "documentation":"

The URI of the HDFS cluster location.

" + "documentation":"

The URI of the HDFS location.

" }, "NameNodes":{ "shape":"HdfsNameNodeList", - "documentation":"

The NameNode that manage the HDFS namespace.

" + "documentation":"

The NameNode that manages the HDFS namespace.

" }, "BlockSize":{ "shape":"HdfsBlockSize", @@ -2126,7 +2130,7 @@ }, "QopConfiguration":{ "shape":"QopConfiguration", - "documentation":"

The Quality of Protection (QOP) configuration specifies the Remote Procedure Call (RPC) and data transfer protection settings configured on the Hadoop Distributed File System (HDFS) cluster.

" + "documentation":"

The Quality of Protection (QOP) configuration, which specifies the Remote Procedure Call (RPC) and data transfer protection settings configured on the HDFS cluster.

" }, "AuthenticationType":{ "shape":"HdfsAuthenticationType", @@ -2134,7 +2138,7 @@ }, "SimpleUser":{ "shape":"HdfsUser", - "documentation":"

The user name used to identify the client on the host operating system. This parameter is used if the AuthenticationType is defined as SIMPLE.

" + "documentation":"

The user name to identify the client on the host operating system. This parameter is used if the AuthenticationType is defined as SIMPLE.

" }, "KerberosPrincipal":{ "shape":"KerberosPrincipal", @@ -2142,7 +2146,7 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

The ARNs of the agents that are used to connect to the HDFS cluster.

" + "documentation":"

The ARNs of the DataSync agents that can connect with your HDFS cluster.

" }, "CreationTime":{ "shape":"Time", @@ -2170,7 +2174,7 @@ }, "LocationUri":{ "shape":"LocationUri", - "documentation":"

The URL of the NFS location.

" + "documentation":"

The URI of the NFS location.

" }, "OnPremConfig":{"shape":"OnPremConfig"}, "MountOptions":{ @@ -2190,7 +2194,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the object storage system location that you want information about.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the object storage system location.

" } }, "documentation":"

DescribeLocationObjectStorageRequest

" @@ -2204,7 +2208,7 @@ }, "LocationUri":{ "shape":"LocationUri", - "documentation":"

The URL of the object storage system location.

" + "documentation":"

The URI of the object storage system location.

" }, "AccessKey":{ "shape":"ObjectStorageAccessKey", @@ -2220,7 +2224,7 @@ }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

The ARNs of the DataSync agents that can securely connect with your location.

" + "documentation":"

The ARNs of the DataSync agents that can connect with your object storage system.

" }, "CreationTime":{ "shape":"Time", @@ -2239,7 +2243,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket location to describe.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the Amazon S3 location.

" } }, "documentation":"

DescribeLocationS3Request

" @@ -2249,7 +2253,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 bucket or access point.

" + "documentation":"

The ARN of the Amazon S3 location.

" }, "LocationUri":{ "shape":"LocationUri", @@ -2257,16 +2261,16 @@ }, "S3StorageClass":{ "shape":"S3StorageClass", - "documentation":"

The Amazon S3 storage class that you chose to store your files in when this location is used as a task destination. For more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that can affect your S3 storage cost. For detailed information, see Considerations when working with S3 storage classes in DataSync.

" + "documentation":"

When Amazon S3 is a destination location, this is the storage class that you chose for your objects.

Some storage classes have behaviors that can affect your Amazon S3 storage costs. For more information, see Storage class considerations with Amazon S3 transfers.

" }, "S3Config":{"shape":"S3Config"}, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

If you are using DataSync on an Amazon Web Services Outpost, the Amazon Resource Name (ARNs) of the EC2 agents deployed on your Outpost. For more information about launching a DataSync agent on an Amazon Web Services Outpost, see Deploy your DataSync agent on Outposts.

" + "documentation":"

The ARNs of the DataSync agents deployed on your Outpost when using working with Amazon S3 on Outposts.

For more information, see Deploy your DataSync agent on Outposts.

" }, "CreationTime":{ "shape":"Time", - "documentation":"

The time that the Amazon S3 bucket location was created.

" + "documentation":"

The time that the Amazon S3 location was created.

" } }, "documentation":"

DescribeLocationS3Response

" @@ -2277,7 +2281,7 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the SMB location to describe.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the SMB location that you want information about.

" } }, "documentation":"

DescribeLocationSmbRequest

" @@ -2287,27 +2291,27 @@ "members":{ "LocationArn":{ "shape":"LocationArn", - "documentation":"

The Amazon Resource Name (ARN) of the SMB location that was described.

" + "documentation":"

The ARN of the SMB location.

" }, "LocationUri":{ "shape":"LocationUri", - "documentation":"

The URL of the source SMB location that was described.

" + "documentation":"

The URI of the SMB location.

" }, "AgentArns":{ "shape":"AgentArnList", - "documentation":"

The Amazon Resource Name (ARN) of the source SMB file system location that is created.

" + "documentation":"

The ARNs of the DataSync agents that can connect with your SMB file server.

" }, "User":{ "shape":"SmbUser", - "documentation":"

The user who can mount the share, has the permissions to access files and folders in the SMB share.

" + "documentation":"

The user that can mount and access the files, folders, and file metadata in your SMB file server.

" }, "Domain":{ "shape":"SmbDomain", - "documentation":"

The name of the Windows domain that the SMB server belongs to.

" + "documentation":"

The name of the Microsoft Active Directory domain that the SMB file server belongs to.

" }, "MountOptions":{ "shape":"SmbMountOptions", - "documentation":"

The mount options that are available for DataSync to use to access an SMB location.

" + "documentation":"

The protocol that DataSync use to access your SMB file.

" }, "CreationTime":{ "shape":"Time", @@ -2499,6 +2503,10 @@ "shape":"FilterList", "documentation":"

A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

" }, + "ManifestConfig":{ + "shape":"ManifestConfig", + "documentation":"

The configuration of the manifest that lists the files or objects to transfer. For more information, see Specifying what DataSync transfers by using a manifest.

" + }, "StartTime":{ "shape":"Time", "documentation":"

The time when the task execution started.

" @@ -2523,17 +2531,17 @@ "shape":"long", "documentation":"

The total number of bytes that are involved in the transfer. For the number of bytes sent over the network, see BytesCompressed.

" }, - "Result":{ - "shape":"TaskExecutionResultDetail", - "documentation":"

The result of the task execution.

" - }, "BytesCompressed":{ "shape":"long", "documentation":"

The physical number of bytes transferred over the network after compression was applied. In most cases, this number is less than BytesTransferred unless the data isn't compressible.

" }, + "Result":{ + "shape":"TaskExecutionResultDetail", + "documentation":"

The result of the task execution.

" + }, "TaskReportConfig":{ "shape":"TaskReportConfig", - "documentation":"

The configuration of your task report, which provides detailed information about for your DataSync transfer.

" + "documentation":"

The configuration of your task report, which provides detailed information about for your DataSync transfer. For more information, see Creating a task report.

" }, "FilesDeleted":{ "shape":"long", @@ -2636,9 +2644,13 @@ "shape":"FilterList", "documentation":"

A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

" }, + "ManifestConfig":{ + "shape":"ManifestConfig", + "documentation":"

The configuration of the manifest that lists the files or objects to transfer. For more information, see Specifying what DataSync transfers by using a manifest.

" + }, "TaskReportConfig":{ "shape":"TaskReportConfig", - "documentation":"

The configuration of your task report. For more information, see Creating a task report.

" + "documentation":"

The configuration of your task report, which provides detailed information about for your DataSync transfer. For more information, see Creating a task report.

" } }, "documentation":"

DescribeTaskResponse

" @@ -2919,7 +2931,7 @@ "members":{ "Domain":{ "shape":"SmbDomain", - "documentation":"

Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to.

" + "documentation":"

Specifies the fully qualified domain name (FQDN) of the Microsoft Active Directory that your storage virtual machine (SVM) belongs to.

If you have multiple domains in your environment, configuring this setting makes sure that DataSync connects to the right SVM.

" }, "MountOptions":{"shape":"SmbMountOptions"}, "Password":{ @@ -2928,7 +2940,7 @@ }, "User":{ "shape":"SmbUser", - "documentation":"

Specifies a user name that can mount the location and access the files, folders, and metadata that you need in the SVM.

If you provide a user in your Active Directory, note the following:

Make sure that the user has the permissions it needs to copy the data you want:

" + "documentation":"

Specifies a user that can mount and access the files, folders, and metadata in your SVM.

For information about choosing a user with the right level of access for your transfer, see Using the SMB protocol.

" } }, "documentation":"

Specifies the Server Message Block (SMB) protocol configuration that DataSync uses to access your Amazon FSx for NetApp ONTAP file system. For more information, see Accessing FSx for ONTAP file systems.

" @@ -3302,15 +3314,15 @@ "members":{ "TaskArn":{ "shape":"TaskArn", - "documentation":"

The Amazon Resource Name (ARN) of the task whose tasks you want to list.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the task that you want execution information about.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of executed tasks to list.

" + "documentation":"

Specifies how many results you want in the response.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

An opaque string that indicates the position at which to begin the next list of the executed tasks.

" + "documentation":"

Specifies an opaque string that indicates the position at which to begin the next list of results in the response.

" } }, "documentation":"

ListTaskExecutions

" @@ -3320,11 +3332,11 @@ "members":{ "TaskExecutions":{ "shape":"TaskExecutionList", - "documentation":"

A list of executed tasks.

" + "documentation":"

A list of the task's executions.

" }, "NextToken":{ "shape":"NextToken", - "documentation":"

An opaque string that indicates the position at which to begin returning the next list of executed tasks.

" + "documentation":"

The opaque string that indicates the position to begin the next list of results in the response.

" } }, "documentation":"

ListTaskExecutionsResponse

" @@ -3437,6 +3449,32 @@ "TRANSFER" ] }, + "ManifestAction":{ + "type":"string", + "enum":["TRANSFER"] + }, + "ManifestConfig":{ + "type":"structure", + "members":{ + "Action":{ + "shape":"ManifestAction", + "documentation":"

Specifies what DataSync uses the manifest for.

" + }, + "Format":{ + "shape":"ManifestFormat", + "documentation":"

Specifies the file format of your manifest. For more information, see Creating a manifest.

" + }, + "Source":{ + "shape":"SourceManifestConfig", + "documentation":"

Specifies the manifest that you want DataSync to use and where it's hosted.

You must specify this parameter if you're configuring a new manifest on or after February 7, 2024.

If you don't, you'll get a 400 status code and ValidationException error stating that you're missing the IAM role for DataSync to access the S3 bucket where you're hosting your manifest. For more information, see Providing DataSync access to your manifest.

" + } + }, + "documentation":"

Configures a manifest, which is a list of files or objects that you want DataSync to transfer. For more information and configuration examples, see Specifying what DataSync transfers by using a manifest.

" + }, + "ManifestFormat":{ + "type":"string", + "enum":["CSV"] + }, "MaxP95Performance":{ "type":"structure", "members":{ @@ -3828,39 +3866,39 @@ "members":{ "VerifyMode":{ "shape":"VerifyMode", - "documentation":"

Specifies how and when DataSync checks the integrity of your data during a transfer.

Default value: POINT_IN_TIME_CONSISTENT

ONLY_FILES_TRANSFERRED (recommended): DataSync calculates the checksum of transferred files and metadata at the source location. At the end of the transfer, DataSync then compares this checksum to the checksum calculated on those files at the destination.

We recommend this option when transferring to S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes. For more information, see Storage class considerations with Amazon S3 locations.

POINT_IN_TIME_CONSISTENT: At the end of the transfer, DataSync scans the entire source and destination to verify that both locations are fully synchronized.

You can't use this option when transferring to S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes. For more information, see Storage class considerations with Amazon S3 locations.

NONE: DataSync doesn't run additional verification at the end of the transfer. All data transmissions are still integrity-checked with checksum verification during the transfer.

" + "documentation":"

Specifies how and when DataSync checks the integrity of your data during a transfer.

" }, "OverwriteMode":{ "shape":"OverwriteMode", - "documentation":"

Specifies whether data at the destination location should be overwritten or preserved. If set to NEVER, a destination file for example will not be replaced by a source file (even if the destination file differs from the source file). If you modify files in the destination and you sync the files, you can use this value to protect against overwriting those changes.

Some storage classes have specific behaviors that can affect your Amazon S3 storage cost. For detailed information, see Considerations when working with Amazon S3 storage classes in DataSync.

" + "documentation":"

Specifies whether DataSync should modify or preserve data at the destination location.

" }, "Atime":{ "shape":"Atime", - "documentation":"

Specifies whether to preserve metadata indicating the last time a file was read or written to. If you set Atime to BEST_EFFORT, DataSync attempts to preserve the original Atime attribute on all source files (that is, the version before the PREPARING phase of the task execution).

The behavior of Atime isn't fully standard across platforms, so DataSync can only do this on a best-effort basis.

Default value: BEST_EFFORT

BEST_EFFORT: Attempt to preserve the per-file Atime value (recommended).

NONE: Ignore Atime.

If Atime is set to BEST_EFFORT, Mtime must be set to PRESERVE.

If Atime is set to NONE, Mtime must also be NONE.

" + "documentation":"

Specifies whether to preserve metadata indicating the last time a file was read or written to.

The behavior of Atime isn't fully standard across platforms, so DataSync can only do this on a best-effort basis.

If Atime is set to BEST_EFFORT, Mtime must be set to PRESERVE.

If Atime is set to NONE, Mtime must also be NONE.

" }, "Mtime":{ "shape":"Mtime", - "documentation":"

Specifies whether to preserve metadata indicating the last time that a file was written to before the PREPARING phase of your task execution. This option is required when you need to run the a task more than once.

Default Value: PRESERVE

PRESERVE: Preserve original Mtime (recommended)

NONE: Ignore Mtime.

If Mtime is set to PRESERVE, Atime must be set to BEST_EFFORT.

If Mtime is set to NONE, Atime must also be set to NONE.

" + "documentation":"

Specifies whether to preserve metadata indicating the last time that a file was written to before the PREPARING phase of your task execution. This option is required when you need to run the a task more than once.

If Mtime is set to PRESERVE, Atime must be set to BEST_EFFORT.

If Mtime is set to NONE, Atime must also be set to NONE.

" }, "Uid":{ "shape":"Uid", - "documentation":"

Specifies the POSIX user ID (UID) of the file's owner.

For more information, see Metadata copied by DataSync.

Default value: INT_VALUE. This preserves the integer value of the ID.

INT_VALUE: Preserve the integer value of UID and group ID (GID) (recommended).

NONE: Ignore UID and GID.

" + "documentation":"

Specifies the POSIX user ID (UID) of the file's owner.

For more information, see Metadata copied by DataSync.

" }, "Gid":{ "shape":"Gid", - "documentation":"

Specifies the POSIX group ID (GID) of the file's owners.

For more information, see Metadata copied by DataSync.

Default value: INT_VALUE. This preserves the integer value of the ID.

INT_VALUE: Preserve the integer value of user ID (UID) and GID (recommended).

NONE: Ignore UID and GID.

" + "documentation":"

Specifies the POSIX group ID (GID) of the file's owners.

For more information, see Metadata copied by DataSync.

" }, "PreserveDeletedFiles":{ "shape":"PreserveDeletedFiles", - "documentation":"

Specifies whether files in the destination location that don't exist in the source should be preserved. This option can affect your Amazon S3 storage cost. If your task deletes objects, you might incur minimum storage duration charges for certain storage classes. For detailed information, see Considerations when working with Amazon S3 storage classes in DataSync.

Default value: PRESERVE

PRESERVE: Ignore such destination files (recommended).

REMOVE: Delete destination files that aren’t present in the source.

If you set this parameter to REMOVE, you can't set TransferMode to ALL. When you transfer all data, DataSync doesn't scan your destination location and doesn't know what to delete.

" + "documentation":"

Specifies whether files in the destination location that don't exist in the source should be preserved. This option can affect your Amazon S3 storage cost. If your task deletes objects, you might incur minimum storage duration charges for certain storage classes. For detailed information, see Considerations when working with Amazon S3 storage classes in DataSync.

If you set this parameter to REMOVE, you can't set TransferMode to ALL. When you transfer all data, DataSync doesn't scan your destination location and doesn't know what to delete.

" }, "PreserveDevices":{ "shape":"PreserveDevices", - "documentation":"

Specifies whether DataSync should preserve the metadata of block and character devices in the source location and recreate the files with that device name and metadata on the destination. DataSync copies only the name and metadata of such devices.

DataSync can't copy the actual contents of these devices because they're nonterminal and don't return an end-of-file (EOF) marker.

Default value: NONE

NONE: Ignore special devices (recommended).

PRESERVE: Preserve character and block device metadata. This option currently isn't supported for Amazon EFS.

" + "documentation":"

Specifies whether DataSync should preserve the metadata of block and character devices in the source location and recreate the files with that device name and metadata on the destination. DataSync copies only the name and metadata of such devices.

DataSync can't copy the actual contents of these devices because they're nonterminal and don't return an end-of-file (EOF) marker.

" }, "PosixPermissions":{ "shape":"PosixPermissions", - "documentation":"

Specifies which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file.

For more information, see Metadata copied by DataSync.

Default value: PRESERVE

PRESERVE: Preserve POSIX-style permissions (recommended).

NONE: Ignore permissions.

DataSync can preserve extant permissions of a source location.

" + "documentation":"

Specifies which users or groups can access a file for a specific purpose such as reading, writing, or execution of the file.

For more information, see Metadata copied by DataSync.

DataSync can preserve extant permissions of a source location.

" }, "BytesPerSecond":{ "shape":"BytesPerSecond", @@ -3872,19 +3910,19 @@ }, "LogLevel":{ "shape":"LogLevel", - "documentation":"

Specifies the type of logs that DataSync publishes to a Amazon CloudWatch Logs log group. To specify the log group, see CloudWatchLogGroupArn.

If you set LogLevel to OFF, no logs are published. BASIC publishes logs on errors for individual files transferred. TRANSFER publishes logs for every file or object that is transferred and integrity checked.

" + "documentation":"

Specifies the type of logs that DataSync publishes to a Amazon CloudWatch Logs log group. To specify the log group, see CloudWatchLogGroupArn.

" }, "TransferMode":{ "shape":"TransferMode", - "documentation":"

Determines whether DataSync transfers only the data and metadata that differ between the source and the destination location or transfers all the content from the source (without comparing what's in the destination).

CHANGED: DataSync copies only data or metadata that is new or different content from the source location to the destination location.

ALL: DataSync copies all source location content to the destination (without comparing what's in the destination).

" + "documentation":"

Determines whether DataSync transfers only the data and metadata that differ between the source and the destination location or transfers all the content from the source (without comparing what's in the destination).

" }, "SecurityDescriptorCopyFlags":{ "shape":"SmbSecurityDescriptorCopyFlags", - "documentation":"

Specifies which components of the SMB security descriptor are copied from source to destination objects.

This value is only used for transfers between SMB and Amazon FSx for Windows File Server locations or between two FSx for Windows File Server locations. For more information, see how DataSync handles metadata.

Default value: OWNER_DACL

OWNER_DACL: For each copied object, DataSync copies the following metadata:

OWNER_DACL_SACL: For each copied object, DataSync copies the following metadata:

NONE: None of the SMB security descriptor components are copied. Destination objects are owned by the user that was provided for accessing the destination location. DACLs and SACLs are set based on the destination server’s configuration.

" + "documentation":"

Specifies which components of the SMB security descriptor are copied from source to destination objects.

This value is only used for transfers between SMB and Amazon FSx for Windows File Server locations or between two FSx for Windows File Server locations. For more information, see how DataSync handles metadata.

" }, "ObjectTags":{ "shape":"ObjectTags", - "documentation":"

Specifies whether object tags are preserved when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE value.

Default Value: PRESERVE

" + "documentation":"

Specifies whether you want DataSync to PRESERVE object tags (default behavior) when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE value.

" } }, "documentation":"

Indicates how your transfer task is configured. These options include how DataSync handles files, objects, and their associated metadata during your transfer. You also can specify how to verify data integrity, set bandwidth limits for your task, among other options.

Each option has a default value. Unless you need to, you don't have to configure any of these options before starting your task.

" @@ -3945,7 +3983,7 @@ "members":{ "Version":{ "shape":"AgentVersion", - "documentation":"

The version of the DataSync agent.

Beginning December 7, 2023, we will discontinue version 1 DataSync agents. Check the DataSync console to see if you have affected agents. If you do, replace those agents before then to avoid data transfer or storage discovery disruptions. If you need more help, contact Amazon Web Services Support.

" + "documentation":"

The version of the DataSync agent.

On December 7, 2023, we discontinued version 1 DataSync agents. Check the DataSync console to see if you have affected agents. If you do, replace those agents or delete them if they aren't in use. If you need more help, contact Amazon Web Services Support.

" } }, "documentation":"

The platform-related details about the DataSync agent, such as the version number.

" @@ -4107,7 +4145,7 @@ }, "BucketAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see Allowing DataSync to upload a task report to an Amazon S3 bucket.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the IAM policy that allows DataSync to upload a task report to your S3 bucket. For more information, see Allowing DataSync to upload a task report to an Amazon S3 bucket.

" } }, "documentation":"

Specifies the Amazon S3 bucket where DataSync uploads your task report.

" @@ -4246,10 +4284,43 @@ "members":{ "BucketAccessRoleArn":{ "shape":"IamRoleArn", - "documentation":"

The ARN of the IAM role for accessing the S3 bucket.

" + "documentation":"

Specifies the ARN of the IAM role that DataSync uses to access your S3 bucket.

" } }, - "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role used to access an Amazon S3 bucket.

For detailed information about using such a role, see Creating a Location for Amazon S3 in the DataSync User Guide.

" + "documentation":"

Specifies the Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that DataSync uses to access your S3 bucket.

For more information, see Accessing S3 buckets.

" + }, + "S3ManifestConfig":{ + "type":"structure", + "required":[ + "ManifestObjectPath", + "BucketAccessRoleArn", + "S3BucketArn" + ], + "members":{ + "ManifestObjectPath":{ + "shape":"S3Subdirectory", + "documentation":"

Specifies the Amazon S3 object key of your manifest. This can include a prefix (for example, prefix/my-manifest.csv).

" + }, + "BucketAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

Specifies the Identity and Access Management (IAM) role that allows DataSync to access your manifest. For more information, see Providing DataSync access to your manifest.

" + }, + "S3BucketArn":{ + "shape":"S3BucketArn", + "documentation":"

Specifies the Amazon Resource Name (ARN) of the S3 bucket where you're hosting your manifest.

" + }, + "ManifestObjectVersionId":{ + "shape":"S3ObjectVersionId", + "documentation":"

Specifies the object version ID of the manifest that you want DataSync to use. If you don't set this, DataSync uses the latest version of the object.

" + } + }, + "documentation":"

Specifies the S3 bucket where you're hosting the manifest that you want DataSync to use. For more information and configuration examples, see Specifying what DataSync transfers by using a manifest.

" + }, + "S3ObjectVersionId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^.+$" }, "S3StorageClass":{ "type":"string", @@ -4333,6 +4404,17 @@ "SMB2_0" ] }, + "SourceManifestConfig":{ + "type":"structure", + "required":["S3"], + "members":{ + "S3":{ + "shape":"S3ManifestConfig", + "documentation":"

Specifies the S3 bucket where you're hosting your manifest.

" + } + }, + "documentation":"

Specifies the manifest that you want DataSync to use and where it's hosted. For more information and configuration examples, see Specifying what DataSync transfers by using a manifest.

" + }, "SourceNetworkInterfaceArns":{ "type":"list", "member":{"shape":"NetworkInterfaceArn"} @@ -4390,13 +4472,17 @@ "shape":"FilterList", "documentation":"

Specifies a list of filter rules that determines which files to exclude from a task. The list contains a single filter string that consists of the patterns to exclude. The patterns are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\".

" }, - "Tags":{ - "shape":"InputTagList", - "documentation":"

Specifies the tags that you want to apply to the Amazon Resource Name (ARN) representing the task execution.

Tags are key-value pairs that help you manage, filter, and search for your DataSync resources.

" + "ManifestConfig":{ + "shape":"ManifestConfig", + "documentation":"

Configures a manifest, which is a list of files or objects that you want DataSync to transfer. For more information and configuration examples, see Specifying what DataSync transfers by using a manifest.

When using this parameter, your caller identity (the role that you're using DataSync with) must have the iam:PassRole permission. The AWSDataSyncFullAccess policy includes this permission.

To remove a manifest configuration, specify this parameter with an empty value.

" }, "TaskReportConfig":{ "shape":"TaskReportConfig", - "documentation":"

Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.

" + "documentation":"

Specifies how you want to configure a task report, which provides detailed information about your DataSync transfer. For more information, see Monitoring your DataSync transfers with task reports.

When using this parameter, your caller identity (the role that you're using DataSync with) must have the iam:PassRole permission. The AWSDataSyncFullAccess policy includes this permission.

To remove a task report configuration, specify this parameter as empty.

" + }, + "Tags":{ + "shape":"InputTagList", + "documentation":"

Specifies the tags that you want to apply to the Amazon Resource Name (ARN) representing the task execution.

Tags are key-value pairs that help you manage, filter, and search for your DataSync resources.

" } }, "documentation":"

StartTaskExecutionRequest

" @@ -4542,14 +4628,14 @@ "members":{ "TaskExecutionArn":{ "shape":"TaskExecutionArn", - "documentation":"

The Amazon Resource Name (ARN) of the task that was executed.

" + "documentation":"

The Amazon Resource Name (ARN) of a task execution.

" }, "Status":{ "shape":"TaskExecutionStatus", - "documentation":"

The status of a task execution.

" + "documentation":"

The status of a task execution. For more information, see Task execution statuses.

" } }, - "documentation":"

Represents a single entry in a list of task executions. TaskExecutionListEntry returns an array that contains a list of specific invocations of a task when the ListTaskExecutions operation is called.

" + "documentation":"

Represents a single entry in a list of DataSync task executions that's returned with the ListTaskExecutions operation.

" }, "TaskExecutionResultDetail":{ "type":"structure", @@ -5105,9 +5191,13 @@ "shape":"FilterList", "documentation":"

Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.

" }, + "ManifestConfig":{ + "shape":"ManifestConfig", + "documentation":"

Configures a manifest, which is a list of files or objects that you want DataSync to transfer. For more information and configuration examples, see Specifying what DataSync transfers by using a manifest.

When using this parameter, your caller identity (the IAM role that you're using DataSync with) must have the iam:PassRole permission. The AWSDataSyncFullAccess policy includes this permission.

To remove a manifest configuration, specify this parameter as empty.

" + }, "TaskReportConfig":{ "shape":"TaskReportConfig", - "documentation":"

Specifies how you want to configure a task report, which provides detailed information about for your DataSync transfer.

" + "documentation":"

Specifies how you want to configure a task report, which provides detailed information about your DataSync transfer. For more information, see Monitoring your DataSync transfers with task reports.

When using this parameter, your caller identity (the IAM role that you're using DataSync with) must have the iam:PassRole permission. The AWSDataSyncFullAccess policy includes this permission.

To remove a task report configuration, specify this parameter as empty.

" } }, "documentation":"

UpdateTaskResponse

" diff -Nru awscli-2.15.9/awscli/botocore/data/datazone/2018-05-10/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/datazone/2018-05-10/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/datazone/2018-05-10/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/datazone/2018-05-10/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -33,7 +33,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -61,7 +60,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -74,7 +74,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -88,7 +87,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -108,7 +106,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -122,14 +119,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -138,11 +133,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -153,14 +148,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -171,7 +168,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -185,14 +183,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -201,11 +197,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -216,14 +212,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -234,9 +232,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/datazone/2018-05-10/service-2.json awscli-2.15.22/awscli/botocore/data/datazone/2018-05-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/datazone/2018-05-10/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/datazone/2018-05-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -4047,6 +4047,10 @@ "shape":"DomainId", "documentation":"

The identifier of the Amazon DataZone domain in which the project was created.

" }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

Reasons for failed project deletion

" + }, "glossaryTerms":{ "shape":"GlossaryTerms", "documentation":"

The glossary terms that can be used in the project.

" @@ -4062,6 +4066,10 @@ "name":{ "shape":"ProjectName", "documentation":"

The name of the project.

" + }, + "projectStatus":{ + "shape":"ProjectStatus", + "documentation":"

Status of the project

" } } }, @@ -5092,6 +5100,12 @@ "documentation":"

The identifier of the Amazon Web Services domain that is to be deleted.

", "location":"uri", "locationName":"identifier" + }, + "skipDeletionCheck":{ + "shape":"Boolean", + "documentation":"

Optional flag to delete all child entities within the domain

", + "location":"querystring", + "locationName":"skipDeletionCheck" } } }, @@ -5295,6 +5309,12 @@ "documentation":"

The identifier of the project that is to be deleted.

", "location":"uri", "locationName":"identifier" + }, + "skipDeletionCheck":{ + "shape":"Boolean", + "documentation":"

Optional flag to asynchronously delete child entities within the project

", + "location":"querystring", + "locationName":"skipDeletionCheck" } } }, @@ -5984,6 +6004,10 @@ }, "documentation":"

Specifies the error message that is returned if the operation cannot be successfully completed.

" }, + "FailureReasons":{ + "type":"list", + "member":{"shape":"ProjectDeletionError"} + }, "Filter":{ "type":"structure", "required":[ @@ -7523,6 +7547,10 @@ "shape":"DomainId", "documentation":"

The ID of the Amazon DataZone domain in which the project exists.

" }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

Reasons for failed project deletion

" + }, "glossaryTerms":{ "shape":"GlossaryTerms", "documentation":"

The business glossary terms that can be used in the project.

" @@ -7538,6 +7566,10 @@ "name":{ "shape":"ProjectName", "documentation":"

The name of the project.

" + }, + "projectStatus":{ + "shape":"ProjectStatus", + "documentation":"

Status of the project

" } } }, @@ -9603,6 +9635,20 @@ }, "documentation":"

The configuration of the prediction.

" }, + "ProjectDeletionError":{ + "type":"structure", + "members":{ + "code":{ + "shape":"String", + "documentation":"

Project Deletion Error Code

" + }, + "message":{ + "shape":"String", + "documentation":"

Project Deletion Error Message

" + } + }, + "documentation":"

Error that occurred during project deletion

" + }, "ProjectId":{ "type":"string", "pattern":"^[a-zA-Z0-9_-]{1,36}$" @@ -9636,6 +9682,14 @@ "pattern":"^[\\w -]+$", "sensitive":true }, + "ProjectStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING", + "DELETE_FAILED" + ] + }, "ProjectSummaries":{ "type":"list", "member":{"shape":"ProjectSummary"} @@ -9665,6 +9719,10 @@ "shape":"DomainId", "documentation":"

The identifier of a Amazon DataZone domain where the project exists.

" }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

Reasons for failed project deletion

" + }, "id":{ "shape":"ProjectId", "documentation":"

The identifier of a project.

" @@ -9673,6 +9731,10 @@ "shape":"ProjectName", "documentation":"

The name of a project.

" }, + "projectStatus":{ + "shape":"ProjectStatus", + "documentation":"

Status of the project

" + }, "updatedAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The timestamp of when the project was updated.

" @@ -12427,6 +12489,10 @@ "shape":"DomainId", "documentation":"

The identifier of the Amazon DataZone domain in which a project is updated.

" }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

Reasons for failed project deletion

" + }, "glossaryTerms":{ "shape":"GlossaryTerms", "documentation":"

The glossary terms of the project that are to be updated.

" @@ -12442,6 +12508,10 @@ "name":{ "shape":"ProjectName", "documentation":"

The name of the project that is to be updated.

" + }, + "projectStatus":{ + "shape":"ProjectStatus", + "documentation":"

Status of the project

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/detective/2018-10-26/service-2.json awscli-2.15.22/awscli/botocore/data/detective/2018-10-26/service-2.json --- awscli-2.15.9/awscli/botocore/data/detective/2018-10-26/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/detective/2018-10-26/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -74,7 +74,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a new behavior graph for the calling account, and sets that account as the administrator account. This operation is called by the account that is enabling Detective.

Before you try to enable Detective, make sure that your account has been enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable Detective, it checks whether your data volume is within the Detective quota. If it exceeds the quota, then you cannot enable Detective.

The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

An account can only be the administrator account for one behavior graph within a Region. If the same account calls CreateGraph with the same administrator account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

" + "documentation":"

Creates a new behavior graph for the calling account, and sets that account as the administrator account. This operation is called by the account that is enabling Detective.

The operation also enables Detective for the calling account in the currently selected Region. It returns the ARN of the new behavior graph.

CreateGraph triggers a process to create the corresponding data tables for the new behavior graph.

An account can only be the administrator account for one behavior graph within a Region. If the same account calls CreateGraph with the same administrator account, it always returns the same behavior graph ARN. It does not create a new behavior graph.

" }, "CreateMembers":{ "name":"CreateMembers", @@ -201,7 +201,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Returns the investigation results of an investigation for a behavior graph.

" + "documentation":"

Detective investigations lets you investigate IAM users and IAM roles using indicators of compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. GetInvestigation returns the investigation results of an investigation for a behavior graph.

" }, "GetMembers":{ "name":"GetMembers", @@ -265,7 +265,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Get the indicators from an investigation

" + "documentation":"

Gets the indicators from an investigation. You can use the information from the indicators to determine if an IAM user and/or IAM role is involved in an unusual activity that could indicate malicious behavior and its impact.

" }, "ListInvestigations":{ "name":"ListInvestigations", @@ -282,7 +282,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

List all Investigations.

" + "documentation":"

Detective investigations lets you investigate IAM users and IAM roles using indicators of compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. ListInvestigations lists all active Detective investigations.

" }, "ListInvitations":{ "name":"ListInvitations", @@ -379,7 +379,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

initiate an investigation on an entity in a graph

" + "documentation":"

Detective investigations lets you investigate IAM users and IAM roles using indicators of compromise. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. StartInvestigation initiates an investigation on an entity in a behavior graph.

" }, "StartMonitoringMember":{ "name":"StartMonitoringMember", @@ -462,7 +462,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Update the state of an investigation.

" + "documentation":"

Updates the state of an investigation.

" }, "UpdateOrganizationConfiguration":{ "name":"UpdateOrganizationConfiguration", @@ -787,7 +787,7 @@ }, "EndInclusive":{ "shape":"Timestamp", - "documentation":"

A timestamp representing the end date of the time period until when data is filtered , including the end date.

" + "documentation":"

A timestamp representing the end date of the time period until when data is filtered, including the end date.

" } }, "documentation":"

Contains details on the time range used to filter data.

" @@ -962,7 +962,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

The ARN of the behavior graph.

" + "documentation":"

The Amazon Resource Name (ARN) of the behavior graph.

" }, "InvestigationId":{ "shape":"InvestigationId", @@ -975,7 +975,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

The ARN of the behavior graph.

" + "documentation":"

The Amazon Resource Name (ARN) of the behavior graph.

" }, "InvestigationId":{ "shape":"InvestigationId", @@ -983,35 +983,35 @@ }, "EntityArn":{ "shape":"EntityArn", - "documentation":"

The unique Amazon Resource Name (ARN) of the IAM user and IAM role.

" + "documentation":"

The unique Amazon Resource Name (ARN). Detective supports IAM user ARNs and IAM role ARNs.

" }, "EntityType":{ "shape":"EntityType", - "documentation":"

Type of entity. For example, Amazon Web Services accounts, such as IAM user and role.

" + "documentation":"

Type of entity. For example, Amazon Web Services accounts, such as an IAM user and/or IAM role.

" }, "CreatedTime":{ "shape":"Timestamp", - "documentation":"

The UTC time stamp of the creation time of the investigation report.

" + "documentation":"

The creation time of the investigation report in UTC time stamp format.

" }, "ScopeStartTime":{ "shape":"Timestamp", - "documentation":"

The start date and time for the scope time set to generate the investigation report.

" + "documentation":"

The start date and time used to set the scope time within which you want to generate the investigation report. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

" }, "ScopeEndTime":{ "shape":"Timestamp", - "documentation":"

The data and time when the investigation began. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

" + "documentation":"

The data and time when the investigation began. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

" }, "Status":{ "shape":"Status", - "documentation":"

Status based on the completion status of the investigation.

" + "documentation":"

The status based on the completion status of the investigation.

" }, "Severity":{ "shape":"Severity", - "documentation":"

Severity based on the likelihood and impact of the indicators of compromise discovered in the investigation.

" + "documentation":"

The severity assigned is based on the likelihood and impact of the indicators of compromise discovered in the investigation.

" }, "State":{ "shape":"State", - "documentation":"

The current state of the investigation. An archived investigation indicates you have completed reviewing the investigation.

" + "documentation":"

The current state of the investigation. An archived investigation indicates that you have completed reviewing the investigation.

" } } }, @@ -1083,7 +1083,7 @@ "members":{ "StartingIpAddress":{ "shape":"IpAddress", - "documentation":"

IP address where the resource was first used in the impossible travel

" + "documentation":"

IP address where the resource was first used in the impossible travel.

" }, "EndingIpAddress":{ "shape":"IpAddress", @@ -1091,7 +1091,7 @@ }, "StartingLocation":{ "shape":"Location", - "documentation":"

Location where the resource was first used in the impossible travel

" + "documentation":"

Location where the resource was first used in the impossible travel.

" }, "EndingLocation":{ "shape":"Location", @@ -1109,14 +1109,14 @@ "members":{ "IndicatorType":{ "shape":"IndicatorType", - "documentation":"

The type of indicator.

" + "documentation":"

The type of indicator.

" }, "IndicatorDetail":{ "shape":"IndicatorDetail", - "documentation":"

Details about the indicator of compromise.

" + "documentation":"

Details about the indicators of compromise that are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident.

" } }, - "documentation":"

Investigations triages indicators of compromises such as a finding and surfaces only the most critical and suspicious issues, so you can focus on high-level investigations.

" + "documentation":"

Detective investigations triages indicators of compromises such as a finding and surfaces only the most critical and suspicious issues, so you can focus on high-level investigations. An Indicator lets you determine if an Amazon Web Services resource is involved in unusual activity that could indicate malicious behavior and its impact.

" }, "IndicatorDetail":{ "type":"structure", @@ -1131,7 +1131,7 @@ }, "FlaggedIpAddressDetail":{ "shape":"FlaggedIpAddressDetail", - "documentation":"

Suspicious IP addresses that are flagged, which indicates critical or severe threats based on threat intelligence by Detective. This indicator is derived from AWS threat intelligence.

" + "documentation":"

Suspicious IP addresses that are flagged, which indicates critical or severe threats based on threat intelligence by Detective. This indicator is derived from Amazon Web Services threat intelligence.

" }, "NewGeolocationDetail":{ "shape":"NewGeolocationDetail", @@ -1154,7 +1154,7 @@ "documentation":"

Contains details about related finding groups.

" } }, - "documentation":"

Details about the indicators of compromise which are used to determine if a resource is involved in a security incident.

" + "documentation":"

Details about the indicators of compromise which are used to determine if a resource is involved in a security incident. An indicator of compromise (IOC) is an artifact observed in or on a network, system, or environment that can (with a high level of confidence) identify malicious activity or a security incident. For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

" }, "IndicatorType":{ "type":"string", @@ -1203,7 +1203,7 @@ }, "CreatedTime":{ "shape":"Timestamp", - "documentation":"

The UTC time stamp of the creation time of the investigation report.

" + "documentation":"

The time stamp of the creation time of the investigation report. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

" }, "EntityArn":{ "shape":"EntityArn", @@ -1214,7 +1214,7 @@ "documentation":"

Type of entity. For example, Amazon Web Services accounts, such as IAM user and role.

" } }, - "documentation":"

Details about the investigation related to a potential security event identified by Detective

" + "documentation":"

Details about the investigation related to a potential security event identified by Detective.

" }, "InvestigationDetails":{ "type":"list", @@ -1306,7 +1306,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

The ARN of the behavior graph.

" + "documentation":"

The Amazon Resource Name (ARN) of the behavior graph.

" }, "InvestigationId":{ "shape":"InvestigationId", @@ -1314,15 +1314,15 @@ }, "IndicatorType":{ "shape":"IndicatorType", - "documentation":"

See Detective investigations..

" + "documentation":"

For the list of indicators of compromise that are generated by Detective investigations, see Detective investigations.

" }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

" + "documentation":"

Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

List the maximum number of indicators in a page.

" + "documentation":"

Lists the maximum number of indicators in a page.

" } } }, @@ -1331,7 +1331,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

The ARN of the behavior graph.

" + "documentation":"

The Amazon Resource Name (ARN) of the behavior graph.

" }, "InvestigationId":{ "shape":"InvestigationId", @@ -1339,11 +1339,11 @@ }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

" + "documentation":"

Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

" }, "Indicators":{ "shape":"Indicators", - "documentation":"

Indicators of compromise listed based on severity.

" + "documentation":"

Lists the indicators of compromise.

" } } }, @@ -1353,19 +1353,19 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

The ARN of the behavior graph.

" + "documentation":"

The Amazon Resource Name (ARN) of the behavior graph.

" }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

" + "documentation":"

Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return a Validation Exception error.

" }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

List the maximum number of investigations in a page.

" + "documentation":"

Lists the maximum number of investigations in a page.

" }, "FilterCriteria":{ "shape":"FilterCriteria", - "documentation":"

Filter the investigation results based on a criteria.

" + "documentation":"

Filters the investigation results based on a criteria.

" }, "SortCriteria":{ "shape":"SortCriteria", @@ -1378,11 +1378,11 @@ "members":{ "InvestigationDetails":{ "shape":"InvestigationDetails", - "documentation":"

Investigations details lists the summary of uncommon behavior or malicious activity which indicates a compromise.

" + "documentation":"

Lists the summary of uncommon behavior or malicious activity which indicates a compromise.

" }, "NextToken":{ "shape":"AiPaginationToken", - "documentation":"

List if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours. Using an expired pagination token will return an HTTP 400 InvalidToken error.

" + "documentation":"

Lists if there are more results available. The value of nextToken is a unique pagination token for each page. Repeat the call using the returned token to retrieve the next page. Keep all other arguments unchanged.

Each pagination token expires after 24 hours.

" } } }, @@ -1635,7 +1635,7 @@ }, "IsNewForEntireAccount":{ "shape":"IsNewForEntireAccount", - "documentation":"

Checks if the ASO is for new for the entire account.

" + "documentation":"

Checks if the Autonomous System Organization (ASO) is new for the entire account.

" } }, "documentation":"

Details new Autonomous System Organizations (ASOs) used either at the resource or account level.

" @@ -1653,7 +1653,7 @@ }, "IsNewForEntireAccount":{ "shape":"IsNewForEntireAccount", - "documentation":"

Checks if the gelocation is new for the entire account.

" + "documentation":"

Checks if the geolocation is new for the entire account.

" } }, "documentation":"

Details new geolocations used either at the resource or account level. For example, lists an observed geolocation that is an infrequent or unused location based on previous user activity.

" @@ -1698,7 +1698,7 @@ "members":{ "Arn":{ "shape":"EntityArn", - "documentation":"

The ARN of the related finding.

" + "documentation":"

The Amazon Resource Name (ARN) of the related finding.

" }, "Type":{ "shape":"Type", @@ -1750,7 +1750,7 @@ "documentation":"

The type of resource that has exceeded the service quota.

" } }, - "documentation":"

This request cannot be completed for one of the following reasons.

", + "documentation":"

This request cannot be completed for one of the following reasons.

", "error":{"httpStatusCode":402}, "exception":true }, @@ -1796,7 +1796,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

The ARN of the behavior graph.

" + "documentation":"

The Amazon Resource Name (ARN) of the behavior graph.

" }, "EntityArn":{ "shape":"EntityArn", @@ -1808,7 +1808,7 @@ }, "ScopeEndTime":{ "shape":"Timestamp", - "documentation":"

The data and time when the investigation began. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

" + "documentation":"

The data and time when the investigation ended. The value is an UTC ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

" } } }, @@ -1881,11 +1881,11 @@ }, "IpAddress":{ "shape":"IpAddress", - "documentation":"

The IP address where the TTP was observed.

" + "documentation":"

The IP address where the tactics, techniques, and procedure (TTP) was observed.

" }, "APIName":{ "shape":"APIName", - "documentation":"

The name of the API where the TTP was observed.

" + "documentation":"

The name of the API where the tactics, techniques, and procedure (TTP) was observed.

" }, "APISuccessCount":{ "shape":"APISuccessCount", @@ -2061,7 +2061,7 @@ "members":{ "GraphArn":{ "shape":"GraphArn", - "documentation":"

The ARN of the behavior graph.

" + "documentation":"

The Amazon Resource Name (ARN) of the behavior graph.

" }, "InvestigationId":{ "shape":"InvestigationId", @@ -2116,5 +2116,5 @@ "value":{"shape":"DatasourcePackageUsageInfo"} } }, - "documentation":"

Detective uses machine learning and purpose-built visualizations to help you to analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account.

To add a member account to the behavior graph, the administrator account sends an invitation to the account. When the account accepts the invitation, it becomes a member account in the behavior graph.

Detective is also integrated with Organizations. The organization management account designates the Detective administrator account for the organization. That account becomes the administrator account for the organization behavior graph. The Detective administrator account is also the delegated administrator account for Detective in Organizations.

The Detective administrator account can enable any organization account as a member account in the organization behavior graph. The organization accounts do not receive invitations. The Detective administrator account can also invite other accounts to the organization behavior graph.

Every behavior graph is specific to a Region. You can only use the API to manage behavior graphs that belong to the Region that is associated with the currently selected endpoint.

The administrator account for a behavior graph can use the Detective API to do the following:

The organization management account can use the Detective API to select the delegated administrator for Detective.

The Detective administrator account for an organization can use the Detective API to do the following:

An invited member account can use the Detective API to do the following:

All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

We replaced the term \"master account\" with the term \"administrator account.\" An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph.

" + "documentation":"

Detective uses machine learning and purpose-built visualizations to help you to analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

The Detective API primarily supports the creation and management of behavior graphs. A behavior graph contains the extracted data from a set of member accounts, and is created and managed by an administrator account.

To add a member account to the behavior graph, the administrator account sends an invitation to the account. When the account accepts the invitation, it becomes a member account in the behavior graph.

Detective is also integrated with Organizations. The organization management account designates the Detective administrator account for the organization. That account becomes the administrator account for the organization behavior graph. The Detective administrator account is also the delegated administrator account for Detective in Organizations.

The Detective administrator account can enable any organization account as a member account in the organization behavior graph. The organization accounts do not receive invitations. The Detective administrator account can also invite other accounts to the organization behavior graph.

Every behavior graph is specific to a Region. You can only use the API to manage behavior graphs that belong to the Region that is associated with the currently selected endpoint.

The administrator account for a behavior graph can use the Detective API to do the following:

The organization management account can use the Detective API to select the delegated administrator for Detective.

The Detective administrator account for an organization can use the Detective API to do the following:

An invited member account can use the Detective API to do the following:

All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

We replaced the term \"master account\" with the term \"administrator account\". An administrator account is used to centrally manage multiple accounts. In the case of Detective, the administrator account manages the accounts in their behavior graph.

" } diff -Nru awscli-2.15.9/awscli/botocore/data/drs/2020-02-26/service-2.json awscli-2.15.22/awscli/botocore/data/drs/2020-02-26/service-2.json --- awscli-2.15.9/awscli/botocore/data/drs/2020-02-26/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/drs/2020-02-26/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -2471,6 +2471,7 @@ "LaunchActionOrder":{ "type":"integer", "documentation":"

Launch action order.

", + "box":true, "max":10000, "min":2 }, @@ -2934,11 +2935,13 @@ }, "MaxResultsReplicatingSourceServers":{ "type":"integer", + "box":true, "max":300, "min":1 }, "MaxResultsType":{ "type":"integer", + "box":true, "max":1000, "min":1 }, @@ -4428,6 +4431,7 @@ }, "StrictlyPositiveInteger":{ "type":"integer", + "box":true, "min":1 }, "SubnetID":{ diff -Nru awscli-2.15.9/awscli/botocore/data/dynamodb/2012-08-10/service-2.json awscli-2.15.22/awscli/botocore/data/dynamodb/2012-08-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/dynamodb/2012-08-10/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/dynamodb/2012-08-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -674,7 +674,7 @@ {"shape":"PointInTimeRecoveryUnavailableException"}, {"shape":"InternalServerError"} ], - "documentation":"

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

You must manually set up the following on the restored table:

", + "documentation":"

Restores the specified table to the specified point in time within EarliestRestorableDateTime and LatestRestorableDateTime. You can restore your table to any point in time during the last 35 days. Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, DynamoDB restores your table data to the state based on the selected date and time (day:hour:minute:second) to a new table.

Along with data, the following are also included on the new restored table using point in time recovery:

You must manually set up the following on the restored table:

", "endpointdiscovery":{ } }, @@ -861,6 +861,24 @@ "endpointdiscovery":{ } }, + "UpdateKinesisStreamingDestination":{ + "name":"UpdateKinesisStreamingDestination", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateKinesisStreamingDestinationInput"}, + "output":{"shape":"UpdateKinesisStreamingDestinationOutput"}, + "errors":[ + {"shape":"InternalServerError"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

The command to update the Kinesis stream destination.

", + "endpointdiscovery":{ + } + }, "UpdateTable":{ "name":"UpdateTable", "http":{ @@ -875,7 +893,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServerError"} ], - "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", + "documentation":"

Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table.

This operation only applies to Version 2019.11.21 (Current) of global tables.

You can only perform one of the following operations at once:

UpdateTable is an asynchronous operation; while it's executing, the table status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue another UpdateTable request on the base table nor any replicas. When the table returns to the ACTIVE state, the UpdateTable operation is complete.

", "endpointdiscovery":{ } }, @@ -915,6 +933,13 @@ } }, "shapes":{ + "ApproximateCreationDateTimePrecision":{ + "type":"string", + "enum":[ + "MILLISECOND", + "MICROSECOND" + ] + }, "ArchivalReason":{"type":"string"}, "ArchivalSummary":{ "type":"structure", @@ -2542,7 +2567,8 @@ "ACTIVE", "DISABLING", "DISABLED", - "ENABLE_FAILED" + "ENABLE_FAILED", + "UPDATING" ] }, "DoubleObject":{"type":"double"}, @@ -2554,6 +2580,16 @@ "documentation":"

There was an attempt to insert an item with the same primary key as an item that already exists in the DynamoDB table.

", "exception":true }, + "EnableKinesisStreamingConfiguration":{ + "type":"structure", + "members":{ + "ApproximateCreationDateTimePrecision":{ + "shape":"ApproximateCreationDateTimePrecision", + "documentation":"

Toggle for the precision of Kinesis data stream timestamp. The values are either MILLISECOND or MICROSECOND.

" + } + }, + "documentation":"

Enables setting the configuration for Kinesis Streaming.

" + }, "Endpoint":{ "type":"structure", "required":[ @@ -3748,6 +3784,10 @@ "DestinationStatusDescription":{ "shape":"String", "documentation":"

The human-readable string that corresponds to the replica status.

" + }, + "ApproximateCreationDateTimePrecision":{ + "shape":"ApproximateCreationDateTimePrecision", + "documentation":"

The precision of the Kinesis data stream timestamp. The values are either MILLISECOND or MICROSECOND.

" } }, "documentation":"

Describes a Kinesis data stream destination.

" @@ -3770,6 +3810,10 @@ "StreamArn":{ "shape":"StreamArn", "documentation":"

The ARN for a Kinesis data stream.

" + }, + "EnableKinesisStreamingConfiguration":{ + "shape":"EnableKinesisStreamingConfiguration", + "documentation":"

The source for the Kinesis streaming information that is being enabled.

" } } }, @@ -3787,6 +3831,10 @@ "DestinationStatus":{ "shape":"DestinationStatus", "documentation":"

The current status of the replication.

" + }, + "EnableKinesisStreamingConfiguration":{ + "shape":"EnableKinesisStreamingConfiguration", + "documentation":"

The destination for the Kinesis streaming information that is being enabled.

" } } }, @@ -4160,7 +4208,7 @@ "members":{ "Statement":{ "shape":"PartiQLStatement", - "documentation":"

A PartiQL statment that uses parameters.

" + "documentation":"

A PartiQL statement that uses parameters.

" }, "Parameters":{ "shape":"PreparedStatementParameters", @@ -4171,7 +4219,7 @@ "documentation":"

An optional parameter that returns the item attributes for a PartiQL ParameterizedStatement operation that failed a condition check.

There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.

" } }, - "documentation":"

Represents a PartiQL statment that uses parameters.

" + "documentation":"

Represents a PartiQL statement that uses parameters.

" }, "ParameterizedStatements":{ "type":"list", @@ -6226,6 +6274,58 @@ }, "documentation":"

Represents the output of an UpdateItem operation.

" }, + "UpdateKinesisStreamingConfiguration":{ + "type":"structure", + "members":{ + "ApproximateCreationDateTimePrecision":{ + "shape":"ApproximateCreationDateTimePrecision", + "documentation":"

Enables updating the precision of Kinesis data stream timestamp.

" + } + }, + "documentation":"

Enables updating the configuration for Kinesis Streaming.

" + }, + "UpdateKinesisStreamingDestinationInput":{ + "type":"structure", + "required":[ + "TableName", + "StreamArn" + ], + "members":{ + "TableName":{ + "shape":"TableName", + "documentation":"

The table name for the Kinesis streaming destination input.

" + }, + "StreamArn":{ + "shape":"StreamArn", + "documentation":"

The ARN for the Kinesis stream input.

" + }, + "UpdateKinesisStreamingConfiguration":{ + "shape":"UpdateKinesisStreamingConfiguration", + "documentation":"

The command to update the Kinesis stream configuration.

" + } + } + }, + "UpdateKinesisStreamingDestinationOutput":{ + "type":"structure", + "members":{ + "TableName":{ + "shape":"TableName", + "documentation":"

The table name for the Kinesis streaming destination output.

" + }, + "StreamArn":{ + "shape":"StreamArn", + "documentation":"

The ARN for the Kinesis stream input.

" + }, + "DestinationStatus":{ + "shape":"DestinationStatus", + "documentation":"

The status of the attempt to update the Kinesis streaming destination output.

" + }, + "UpdateKinesisStreamingConfiguration":{ + "shape":"UpdateKinesisStreamingConfiguration", + "documentation":"

The command to update the Kinesis streaming destination configuration.

" + } + } + }, "UpdateReplicationGroupMemberAction":{ "type":"structure", "required":["RegionName"], @@ -6279,7 +6379,7 @@ }, "StreamSpecification":{ "shape":"StreamSpecification", - "documentation":"

Represents the DynamoDB Streams configuration for the table.

You receive a ResourceInUseException if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.

" + "documentation":"

Represents the DynamoDB Streams configuration for the table.

You receive a ValidationException if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.

" }, "SSESpecification":{ "shape":"SSESpecification", diff -Nru awscli-2.15.9/awscli/botocore/data/ec2/2016-11-15/service-2.json awscli-2.15.22/awscli/botocore/data/ec2/2016-11-15/service-2.json --- awscli-2.15.9/awscli/botocore/data/ec2/2016-11-15/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/ec2/2016-11-15/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -2423,7 +2423,7 @@ }, "input":{"shape":"DescribeElasticGpusRequest"}, "output":{"shape":"DescribeElasticGpusResult"}, - "documentation":"

Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes the Elastic Graphics accelerator associated with your instances. For more information about Elastic Graphics, see Amazon Elastic Graphics.

" }, "DescribeExportImageTasks":{ "name":"DescribeExportImageTasks", @@ -3711,7 +3711,7 @@ }, "input":{"shape":"DetachVolumeRequest"}, "output":{"shape":"VolumeAttachment"}, - "documentation":"

Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

When a volume with an Amazon Web Services Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

For more information, see Detach an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" + "documentation":"

Detaches an EBS volume from an instance. Make sure to unmount any file systems on the device within your operating system before detaching the volume. Failure to do so can result in the volume becoming stuck in the busy state while detaching. If this happens, detachment can be delayed indefinitely until you unmount the volume, force detachment, reboot the instance, or all three. If an EBS volume is the root device of an instance, it can't be detached while the instance is running. To detach the root volume, stop the instance first.

When a volume with an Amazon Web Services Marketplace product code is detached from an instance, the product code is no longer associated with the instance.

You can't detach or force detach volumes that are attached to Amazon ECS or Fargate tasks. Attempting to do this results in the UnsupportedOperationException exception with the Unable to detach volume attached to ECS tasks error message.

For more information, see Detach an Amazon EBS volume in the Amazon Elastic Compute Cloud User Guide.

" }, "DetachVpnGateway":{ "name":"DetachVpnGateway", @@ -4465,7 +4465,7 @@ }, "input":{"shape":"GetLaunchTemplateDataRequest"}, "output":{"shape":"GetLaunchTemplateDataResult"}, - "documentation":"

Retrieves the configuration data of the specified instance. You can use this data to create a launch template.

This action calls on other describe actions to get instance information. Depending on your instance configuration, you may need to allow the following actions in your IAM policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications, DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, you can allow describe* depending on your instance requirements.

" + "documentation":"

Retrieves the configuration data of the specified instance. You can use this data to create a launch template.

This action calls on other describe actions to get instance information. Depending on your instance configuration, you may need to allow the following actions in your IAM policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications, DescribeVolumes, and DescribeInstanceAttribute. Or, you can allow describe* depending on your instance requirements.

" }, "GetManagedPrefixListAssociations":{ "name":"GetManagedPrefixListAssociations", @@ -5966,7 +5966,7 @@ }, "input":{"shape":"StartInstancesRequest"}, "output":{"shape":"StartInstancesResult"}, - "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

If you attempt to start a T3 instance with host tenancy and the unlimted CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated.

For more information, see Stop and start your instance in the Amazon EC2 User Guide.

" + "documentation":"

Starts an Amazon EBS-backed instance that you've previously stopped.

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and started. When an instance is stopped, the compute resources are released and you are not billed for instance usage. However, your root partition Amazon EBS volume remains and continues to persist your data, and you are charged for Amazon EBS volume usage. You can restart your instance at any time. Every time you start your instance, Amazon EC2 charges a one-minute minimum for instance usage, and thereafter charges per second for instance usage.

Before stopping an instance, make sure it is in a state from which it can be restarted. Stopping an instance does not preserve data stored in RAM.

Performing this operation on an instance that uses an instance store as its root device returns an error.

If you attempt to start a T3 instance with host tenancy and the unlimited CPU credit option, the request fails. The unlimited CPU credit option is not supported on Dedicated Hosts. Before you start the instance, either change its CPU credit option to standard, or change its tenancy to default or dedicated.

For more information, see Stop and start your instance in the Amazon EC2 User Guide.

" }, "StartNetworkInsightsAccessScopeAnalysis":{ "name":"StartNetworkInsightsAccessScopeAnalysis", @@ -9245,7 +9245,8 @@ "available", "information", "impaired", - "unavailable" + "unavailable", + "constrained" ] }, "AvailabilityZoneStringList":{ @@ -12595,7 +12596,7 @@ "members":{ "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

" + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

" }, "DryRun":{ "shape":"Boolean", @@ -13674,7 +13675,7 @@ }, "ClientToken":{ "shape":"String", - "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

Constraint: Maximum 64 ASCII characters.

", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

Constraint: Maximum 64 ASCII characters.

", "idempotencyToken":true }, "DryRun":{ @@ -13809,6 +13810,11 @@ "shape":"TagSpecificationList", "documentation":"

The tags to assign to the network ACL.

", "locationName":"TagSpecification" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", + "idempotencyToken":true } } }, @@ -13819,6 +13825,11 @@ "shape":"NetworkAcl", "documentation":"

Information about the network ACL.

", "locationName":"networkAcl" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier to ensure the idempotency of the request. Only returned if a client token was provided in the request.

", + "locationName":"clientToken" } } }, @@ -14384,6 +14395,11 @@ "shape":"TagSpecificationList", "documentation":"

The tags to assign to the route table.

", "locationName":"TagSpecification" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency.

", + "idempotencyToken":true } } }, @@ -14394,6 +14410,11 @@ "shape":"RouteTable", "documentation":"

Information about the route table.

", "locationName":"routeTable" + }, + "ClientToken":{ + "shape":"String", + "documentation":"

Unique, case-sensitive identifier to ensure the idempotency of the request. Only returned if a client token was provided in the request.

", + "locationName":"clientToken" } } }, @@ -18856,7 +18877,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

", + "documentation":"

The filters.

", "locationName":"Filter" }, "ZoneNames":{ @@ -19038,7 +19059,7 @@ }, "MaxResults":{ "shape":"DescribeCapacityBlockOfferingsMaxResults", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

" + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

" } } }, @@ -19076,7 +19097,7 @@ }, "MaxResults":{ "shape":"DescribeCapacityReservationFleetsMaxResults", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

" + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

" }, "Filters":{ "shape":"FilterList", @@ -19123,7 +19144,7 @@ }, "MaxResults":{ "shape":"DescribeCapacityReservationsMaxResults", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

" + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

" }, "Filters":{ "shape":"FilterList", @@ -22920,7 +22941,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

", + "documentation":"

The filters.

", "locationName":"Filter" }, "DryRun":{ @@ -27025,6 +27046,16 @@ "shape":"String", "documentation":"

The ID of the EBS volume.

", "locationName":"volumeId" + }, + "AssociatedResource":{ + "shape":"String", + "documentation":"

The ARN of the Amazon ECS or Fargate task to which the volume is attached.

", + "locationName":"associatedResource" + }, + "VolumeOwnerId":{ + "shape":"String", + "documentation":"

The ID of the Amazon Web Services account that owns the volume.

This parameter is returned only for volumes that are attached to Fargate tasks.

", + "locationName":"volumeOwnerId" } }, "documentation":"

Describes a parameter used to set up an EBS volume in a block device mapping.

" @@ -27269,7 +27300,7 @@ "locationName":"elasticGpuAssociationTime" } }, - "documentation":"

Describes the association between an instance and an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes the association between an instance and an Elastic Graphics accelerator.

" }, "ElasticGpuAssociationList":{ "type":"list", @@ -27287,7 +27318,7 @@ "locationName":"status" } }, - "documentation":"

Describes the status of an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes the status of an Elastic Graphics accelerator.

" }, "ElasticGpuId":{"type":"string"}, "ElasticGpuIdSet":{ @@ -27313,7 +27344,7 @@ "documentation":"

The type of Elastic Graphics accelerator. For more information about the values to specify for Type, see Elastic Graphics Basics, specifically the Elastic Graphics accelerator column, in the Amazon Elastic Compute Cloud User Guide for Windows Instances.

" } }, - "documentation":"

A specification for an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

A specification for an Elastic Graphics accelerator.

" }, "ElasticGpuSpecificationList":{ "type":"list", @@ -27327,11 +27358,11 @@ "members":{ "Type":{ "shape":"String", - "documentation":"

The elastic GPU type.

", + "documentation":"

Deprecated.

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

", "locationName":"type" } }, - "documentation":"

Describes an elastic GPU.

" + "documentation":"

Deprecated.

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

" }, "ElasticGpuSpecificationResponseList":{ "type":"list", @@ -27397,7 +27428,7 @@ "locationName":"tagSet" } }, - "documentation":"

Describes an Elastic Graphics accelerator.

" + "documentation":"

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

Describes an Elastic Graphics accelerator.

" }, "ElasticInferenceAccelerator":{ "type":"structure", @@ -27552,11 +27583,11 @@ "members":{ "Source":{ "shape":"String", - "documentation":"

The source Region or Availability Zone that the metric subscription is enabled for. For example, us-east-1.

" + "documentation":"

The source Region (like us-east-1) or Availability Zone ID (like use1-az1) that the metric subscription is enabled for. If you use Availability Zone IDs, the Source and Destination Availability Zones must be in the same Region.

" }, "Destination":{ "shape":"String", - "documentation":"

The target Region or Availability Zone that the metric subscription is enabled for. For example, eu-west-1.

" + "documentation":"

The target Region (like us-east-2) or Availability Zone ID (like use2-az2) that the metric subscription is enabled for. If you use Availability Zone IDs, the Source and Destination Availability Zones must be in the same Region.

" }, "Metric":{ "shape":"MetricType", @@ -27996,7 +28027,7 @@ "members":{ "State":{ "shape":"SnapshotBlockPublicAccessState", - "documentation":"

The mode in which to enable block public access for snapshots for the Region. Specify one of the following values:

" + "documentation":"

The mode in which to enable block public access for snapshots for the Region. Specify one of the following values:

unblocked is not a valid value for EnableSnapshotBlockPublicAccess.

" }, "DryRun":{ "shape":"Boolean", @@ -30232,7 +30263,7 @@ }, "MaxResults":{ "shape":"GetCapacityReservationUsageRequestMaxResults", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

Valid range: Minimum value of 1. Maximum value of 1000.

" + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

" }, "DryRun":{ "shape":"Boolean", @@ -30527,7 +30558,7 @@ }, "MaxResults":{ "shape":"GetGroupsForCapacityReservationRequestMaxResults", - "documentation":"

The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

" + "documentation":"

The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination.

" }, "DryRun":{ "shape":"Boolean", @@ -31444,7 +31475,7 @@ }, "TargetCapacityUnitType":{ "shape":"TargetCapacityUnitType", - "documentation":"

The unit for the target capacity.

Default: units (translates to number of instances)

" + "documentation":"

The unit for the target capacity.

" }, "SingleAvailabilityZone":{ "shape":"Boolean", @@ -34067,7 +34098,7 @@ }, "ElasticGpuAssociations":{ "shape":"ElasticGpuAssociationList", - "documentation":"

The Elastic GPU associated with the instance.

", + "documentation":"

Deprecated.

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

", "locationName":"elasticGpuAssociationSet" }, "ElasticInferenceAcceleratorAssociations":{ @@ -34973,7 +35004,7 @@ "members":{ "HttpTokens":{ "shape":"HttpTokensState", - "documentation":"

IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional (in other words, set the use of IMDSv2 to optional) or required (in other words, set the use of IMDSv2 to required).

Default: optional

" + "documentation":"

Indicates whether IMDSv2 is required.

Default: If the value of ImdsSupport for the Amazon Machine Image (AMI) for your instance is v2.0, the default is required.

" }, "HttpPutResponseHopLimit":{ "shape":"Integer", @@ -35004,7 +35035,7 @@ }, "HttpTokens":{ "shape":"HttpTokensState", - "documentation":"

IMDSv2 uses token-backed sessions. Indicates whether the use of HTTP tokens is optional (in other words, indicates whether the use of IMDSv2 is optional) or required (in other words, indicates whether the use of IMDSv2 is required).

Default: optional

", + "documentation":"

Indicates whether IMDSv2 is required.

", "locationName":"httpTokens" }, "HttpPutResponseHopLimit":{ @@ -35439,12 +35470,12 @@ }, "SpotMaxPricePercentageOverLowestPrice":{ "shape":"Integer", - "documentation":"

The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 100

", + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To indicate no price protection threshold, specify a high value, such as 999999.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice is used and the value for that parameter defaults to 100.

Default: 100

", "locationName":"spotMaxPricePercentageOverLowestPrice" }, "OnDemandMaxPricePercentageOverLowestPrice":{ "shape":"Integer", - "documentation":"

The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 20

", + "documentation":"

[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 20

", "locationName":"onDemandMaxPricePercentageOverLowestPrice" }, "BareMetal":{ @@ -35521,6 +35552,11 @@ "shape":"AllowedInstanceTypeSet", "documentation":"

The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.

You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*.

For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.

If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes.

Default: All instance types

", "locationName":"allowedInstanceTypeSet" + }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice":{ + "shape":"Integer", + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To indicate no price protection threshold, specify a high value, such as 999999.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice is used and the value for that parameter defaults to 100.

", + "locationName":"maxSpotPriceAsPercentageOfOptimalOnDemandPrice" } }, "documentation":"

The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.

You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:

If you specify InstanceRequirements, you can't specify InstanceType.

Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard or with the RunInstances API, you can't specify InstanceRequirements.

For more information, see Create a mixed instances group using attribute-based instance type selection in the Amazon EC2 Auto Scaling User Guide, and also Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide.

" @@ -35561,11 +35597,11 @@ }, "SpotMaxPricePercentageOverLowestPrice":{ "shape":"Integer", - "documentation":"

The price protection threshold for Spot Instance. This is the maximum you’ll pay for an Spot Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 100

" + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To indicate no price protection threshold, specify a high value, such as 999999.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice is used and the value for that parameter defaults to 100.

Default: 100

" }, "OnDemandMaxPricePercentageOverLowestPrice":{ "shape":"Integer", - "documentation":"

The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 20

" + "documentation":"

[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To indicate no price protection threshold, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 20

" }, "BareMetal":{ "shape":"BareMetal", @@ -35631,6 +35667,10 @@ "shape":"AllowedInstanceTypeSet", "documentation":"

The instance types to apply your specified attributes against. All other instance types are ignored, even if they match your specified attributes.

You can use strings with one or more wild cards, represented by an asterisk (*), to allow an instance type, size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*, r*, *3*.

For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance family, which includes all C5a and C5n instance types. If you specify m5a.*, Amazon EC2 will allow all the M5a instance types, but not the M5n instance types.

If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes.

Default: All instance types

", "locationName":"AllowedInstanceType" + }, + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice":{ + "shape":"Integer", + "documentation":"

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To indicate no price protection threshold, specify a high value, such as 999999.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, then SpotMaxPricePercentageOverLowestPrice is used and the value for that parameter defaults to 100.

" } }, "documentation":"

The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.

You must specify VCpuCount and MemoryMiB. All other attributes are optional. Any unspecified optional attribute is set to its default.

When you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.

To limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:

If you specify InstanceRequirements, you can't specify InstanceType.

Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the launch instance wizard, or with the RunInstances API or AWS::EC2::Instance Amazon Web Services CloudFormation resource, you can't specify InstanceRequirements.

For more information, see Attribute-based instance type selection for EC2 Fleet, Attribute-based instance type selection for Spot Fleet, and Spot placement score in the Amazon EC2 User Guide.

" @@ -40211,7 +40251,7 @@ "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource to tag.

Valid Values lists all resource types for Amazon EC2 that can be tagged. When you create a launch template, you can specify tags for the following resource types only: instance | volume | elastic-gpu | network-interface | spot-instances-request. If the instance does not include the resource type that you specify, the instance launch fails. For example, not all instance types include an Elastic GPU.

To tag a resource after it has been created, see CreateTags.

" + "documentation":"

The type of resource to tag.

Valid Values lists all resource types for Amazon EC2 that can be tagged. When you create a launch template, you can specify tags for the following resource types only: instance | volume | network-interface | spot-instances-request. If the instance does not include the resource type that you specify, the instance launch fails. For example, not all instance types include a volume.

To tag a resource after it has been created, see CreateTags.

" }, "Tags":{ "shape":"TagList", @@ -41915,7 +41955,7 @@ }, "BlockDeviceMappings":{ "shape":"InstanceBlockDeviceMappingSpecificationList", - "documentation":"

Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated.

To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Update the block device mapping when launching an instance in the Amazon EC2 User Guide.

", + "documentation":"

Modifies the DeleteOnTermination attribute for volumes that are currently attached. The volume must be owned by the caller. If no value is specified for DeleteOnTermination, the default is true and the volume is deleted when the instance is terminated. You can't modify the DeleteOnTermination attribute for volumes that are attached to Fargate tasks.

To add instance store volumes to an Amazon EBS-backed instance, you must add them when you launch the instance. For more information, see Update the block device mapping when launching an instance in the Amazon EC2 User Guide.

", "locationName":"blockDeviceMapping" }, "DisableApiTermination":{ @@ -42170,7 +42210,7 @@ }, "HttpTokens":{ "shape":"HttpTokensState", - "documentation":"

IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional (in other words, set the use of IMDSv2 to optional) or required (in other words, set the use of IMDSv2 to required).

Default: optional

" + "documentation":"

Indicates whether IMDSv2 is required.

Default: If the value of ImdsSupport for the Amazon Machine Image (AMI) for your instance is v2.0, the default is required.

" }, "HttpPutResponseHopLimit":{ "shape":"Integer", @@ -42878,7 +42918,7 @@ }, "MapPublicIpOnLaunch":{ "shape":"AttributeBooleanValue", - "documentation":"

Specify true to indicate that network interfaces attached to instances created in the specified subnet should be assigned a public IPv4 address.

" + "documentation":"

Specify true to indicate that network interfaces attached to instances created in the specified subnet should be assigned a public IPv4 address.

Starting on February 1, 2024, Amazon Web Services will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page.

" }, "SubnetId":{ "shape":"SubnetId", @@ -44203,7 +44243,7 @@ }, "RekeyMarginTimeSeconds":{ "shape":"Integer", - "documentation":"

The margin time, in seconds, before the phase 2 lifetime expires, during which the Amazon Web Services side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 540

" + "documentation":"

The margin time, in seconds, before the phase 2 lifetime expires, during which the Amazon Web Services side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 270

" }, "RekeyFuzzPercentage":{ "shape":"Integer", @@ -49057,7 +49097,7 @@ }, "ElasticGpuSpecifications":{ "shape":"ElasticGpuSpecificationList", - "documentation":"

An elastic GPU to associate with the instance.

", + "documentation":"

Deprecated.

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

", "locationName":"ElasticGpuSpecification" }, "ElasticInferenceAccelerators":{ @@ -50337,7 +50377,7 @@ }, "ElasticGpuSpecifications":{ "shape":"ElasticGpuSpecificationResponseList", - "documentation":"

The elastic GPU specification.

", + "documentation":"

Deprecated.

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

", "locationName":"elasticGpuSpecificationSet" }, "ElasticInferenceAccelerators":{ @@ -51283,7 +51323,7 @@ }, "ElasticGpuSpecification":{ "shape":"ElasticGpuSpecifications", - "documentation":"

An elastic GPU to associate with the instance. An Elastic GPU is a GPU resource that you can attach to your Windows instance to accelerate the graphics performance of your applications. For more information, see Amazon EC2 Elastic GPUs in the Amazon EC2 User Guide.

" + "documentation":"

Deprecated.

Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.

" }, "ElasticInferenceAccelerators":{ "shape":"ElasticInferenceAccelerators", @@ -51292,7 +51332,7 @@ }, "TagSpecifications":{ "shape":"TagSpecificationList", - "documentation":"

The tags to apply to the resources that are created during instance launch.

You can specify tags for the following resources only:

To tag a resource after it has been created, see CreateTags.

", + "documentation":"

The tags to apply to the resources that are created during instance launch.

You can specify tags for the following resources only:

To tag a resource after it has been created, see CreateTags.

", "locationName":"TagSpecification" }, "LaunchTemplate":{ @@ -51867,7 +51907,7 @@ "members":{ "AssociatePublicIpAddress":{ "shape":"Boolean", - "documentation":"

Indicates whether to assign a public IPv4 address to instances launched in a VPC. The public IPv4 address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

" + "documentation":"

Indicates whether to assign a public IPv4 address to instances launched in a VPC. The public IPv4 address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one. You cannot specify more than one network interface in the request. If launching into a default subnet, the default value is true.

Starting on February 1, 2024, Amazon Web Services will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page.

" }, "DeleteOnTermination":{ "shape":"Boolean", @@ -53524,7 +53564,7 @@ }, "TargetCapacityUnitType":{ "shape":"TargetCapacityUnitType", - "documentation":"

The unit for the target capacity. TargetCapacityUnitType can only be specified when InstanceRequirements is specified.

Default: units (translates to number of instances)

", + "documentation":"

The unit for the target capacity. You can specify this parameter only when using attribute-based instance type selection.

Default: units (the number of instances)

", "locationName":"targetCapacityUnitType" }, "TagSpecifications":{ @@ -54225,7 +54265,7 @@ }, "Message":{ "shape":"String", - "documentation":"

The message for the state change.

", + "documentation":"

The message for the state change.

", "locationName":"message" } }, @@ -54425,7 +54465,7 @@ }, "MapPublicIpOnLaunch":{ "shape":"Boolean", - "documentation":"

Indicates whether instances launched in this subnet receive a public IPv4 address.

", + "documentation":"

Indicates whether instances launched in this subnet receive a public IPv4 address.

Starting on February 1, 2024, Amazon Web Services will charge for all public IPv4 addresses, including public IPv4 addresses associated with running instances and Elastic IP addresses. For more information, see the Public IPv4 Address tab on the Amazon VPC pricing page.

", "locationName":"mapPublicIpOnLaunch" }, "MapCustomerOwnedIpOnLaunch":{ @@ -54680,7 +54720,8 @@ "type":"string", "enum":[ "pending", - "available" + "available", + "unavailable" ] }, "Subscription":{ @@ -54872,7 +54913,7 @@ "members":{ "TotalTargetCapacity":{ "shape":"Integer", - "documentation":"

The number of units to request, filled using DefaultTargetCapacityType.

", + "documentation":"

The number of units to request, filled the default target capacity type.

", "locationName":"totalTargetCapacity" }, "OnDemandTargetCapacity":{ @@ -54887,12 +54928,12 @@ }, "DefaultTargetCapacityType":{ "shape":"DefaultTargetCapacityType", - "documentation":"

The default TotalTargetCapacity, which is either Spot or On-Demand.

", + "documentation":"

The default target capacity type.

", "locationName":"defaultTargetCapacityType" }, "TargetCapacityUnitType":{ "shape":"TargetCapacityUnitType", - "documentation":"

The unit for the target capacity. TargetCapacityUnitType can only be specified when InstanceRequirements is specified.

Default: units (translates to number of instances)

", + "documentation":"

The unit for the target capacity.

", "locationName":"targetCapacityUnitType" } }, @@ -54904,7 +54945,7 @@ "members":{ "TotalTargetCapacity":{ "shape":"Integer", - "documentation":"

The number of units to request, filled using DefaultTargetCapacityType.

" + "documentation":"

The number of units to request, filled using the default target capacity type.

" }, "OnDemandTargetCapacity":{ "shape":"Integer", @@ -54916,14 +54957,14 @@ }, "DefaultTargetCapacityType":{ "shape":"DefaultTargetCapacityType", - "documentation":"

The default TotalTargetCapacity, which is either Spot or On-Demand.

" + "documentation":"

The default target capacity type.

" }, "TargetCapacityUnitType":{ "shape":"TargetCapacityUnitType", - "documentation":"

The unit for the target capacity. TargetCapacityUnitType can only be specified when InstanceRequirements is specified.

Default: units (translates to number of instances)

" + "documentation":"

The unit for the target capacity. You can specify this parameter only when using attributed-based instance type selection.

Default: units (the number of instances)

" } }, - "documentation":"

The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn’t met the target capacity. The MaxTotalPrice parameters are located in OnDemandOptionsRequest and SpotOptionsRequest.

" + "documentation":"

The number of units to request. You can choose to set the target capacity as the number of instances. Or you can set the target capacity to a performance characteristic that is important to your application workload, such as vCPUs, memory, or I/O. If the request type is maintain, you can specify a target capacity of 0 and add capacity later.

You can use the On-Demand Instance MaxTotalPrice parameter, the Spot Instance MaxTotalPrice parameter, or both parameters to ensure that your fleet cost does not exceed your budget. If you set a maximum price per hour for the On-Demand Instances and Spot Instances in your request, EC2 Fleet will launch instances until it reaches the maximum amount that you're willing to pay. When the maximum amount you're willing to pay is reached, the fleet stops launching instances even if it hasn't met the target capacity. The MaxTotalPrice parameters are located in OnDemandOptionsRequest and SpotOptionsRequest.

" }, "TargetCapacityUnitType":{ "type":"string", @@ -58991,12 +59032,12 @@ }, "Device":{ "shape":"String", - "documentation":"

The device name.

", + "documentation":"

The device name.

If the volume is attached to a Fargate task, this parameter returns null.

", "locationName":"device" }, "InstanceId":{ "shape":"String", - "documentation":"

The ID of the instance.

", + "documentation":"

The ID of the instance.

If the volume is attached to a Fargate task, this parameter returns null.

", "locationName":"instanceId" }, "State":{ @@ -59013,6 +59054,16 @@ "shape":"Boolean", "documentation":"

Indicates whether the EBS volume is deleted on instance termination.

", "locationName":"deleteOnTermination" + }, + "AssociatedResource":{ + "shape":"String", + "documentation":"

The ARN of the Amazon ECS or Fargate task to which the volume is attached.

", + "locationName":"associatedResource" + }, + "InstanceOwningService":{ + "shape":"String", + "documentation":"

The service principal of Amazon Web Services service that owns the underlying instance to which the volume is attached.

This parameter is returned only for volumes that are attached to Fargate tasks.

", + "locationName":"instanceOwningService" } }, "documentation":"

Describes volume attachment details.

" @@ -60394,7 +60445,7 @@ }, "RekeyMarginTimeSeconds":{ "shape":"Integer", - "documentation":"

The margin time, in seconds, before the phase 2 lifetime expires, during which the Amazon Web Services side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 540

" + "documentation":"

The margin time, in seconds, before the phase 2 lifetime expires, during which the Amazon Web Services side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for RekeyFuzzPercentage.

Constraints: A value between 60 and half of Phase2LifetimeSeconds.

Default: 270

" }, "RekeyFuzzPercentage":{ "shape":"Integer", diff -Nru awscli-2.15.9/awscli/botocore/data/ecs/2014-11-13/service-2.json awscli-2.15.22/awscli/botocore/data/ecs/2014-11-13/service-2.json --- awscli-2.15.9/awscli/botocore/data/ecs/2014-11-13/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/ecs/2014-11-13/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -65,7 +65,7 @@ {"shape":"AccessDeniedException"}, {"shape":"NamespaceNotFoundException"} ], - "documentation":"

Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

There are two service scheduler strategies available:

You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.

If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another copy of the task in the specified cluster. To update an existing service, see the UpdateService action.

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

In addition to maintaining the desired count of tasks in your service, you can optionally run your service behind one or more load balancers. The load balancers distribute traffic across the tasks that are associated with the service. For more information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. volumeConfigurations is only supported for REPLICA service and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

Tasks for services that don't use a load balancer are considered healthy if they're in the RUNNING state. Tasks for services that use a load balancer are considered healthy if they're in the RUNNING state and are reported as healthy by the load balancer.

There are two service scheduler strategies available:

You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%.

If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a percentage of your desired number of tasks (rounded up to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can deploy without using additional cluster capacity. For example, if you set your service to have desired number of four tasks and a minimum healthy percent of 50%, the scheduler might stop two existing tasks to free up cluster capacity before starting two new tasks. If they're in the RUNNING state, tasks for services that don't use a load balancer are considered healthy . If they're in the RUNNING state and reported as healthy by the load balancer, tasks for services that do use a load balancer are considered healthy . The default value for minimum healthy percent is 100%.

If a service uses the ECS deployment controller, the maximum percent parameter represents an upper limit on the number of tasks in a service that are allowed in the RUNNING or PENDING state during a deployment. Specifically, it represents it as a percentage of the desired number of tasks (rounded down to the nearest integer). This happens when any of your container instances are in the DRAINING state if the service contains tasks using the EC2 launch type. Using this parameter, you can define the deployment batch size. For example, if your service has a desired number of four tasks and a maximum percent value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default value for maximum percent is 200%.

If a service uses either the CODE_DEPLOY or EXTERNAL deployment controller types and tasks that use the EC2 launch type, the minimum healthy percent and maximum percent values are used only to define the lower and upper limit on the number of the tasks in the service that remain in the RUNNING state. This is while the container instances are in the DRAINING state. If the tasks in the service use the Fargate launch type, the minimum healthy percent and maximum percent values aren't used. This is the case even if they're currently visible when describing your service.

When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

When the service scheduler launches new tasks, it determines task placement. For information about task placement and task placement strategies, see Amazon ECS task placement in the Amazon Elastic Container Service Developer Guide.

" }, "CreateTaskSet":{ "name":"CreateTaskSet", @@ -88,7 +88,7 @@ {"shape":"ServiceNotActiveException"}, {"shape":"NamespaceNotFoundException"} ], - "documentation":"

Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

You can create a maximum of 5 tasks sets for a deployment.

" + "documentation":"

Create a task set in the specified cluster and service. This is used when a service uses the EXTERNAL deployment controller type. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.

For information about the maximum number of task sets and otther quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide.

" }, "DeleteAccountSetting":{ "name":"DeleteAccountSetting", @@ -572,7 +572,7 @@ {"shape":"ClientException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Modifies an account setting. Account settings are set on a per-Region basis.

If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

When you specify serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging.

When you specify awsvpcTrunking, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide.

When you specify containerInsights, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.

Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.

When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to configure the wait time to retire a Fargate task. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

" + "documentation":"

Modifies an account setting. Account settings are set on a per-Region basis.

If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide.

" }, "PutAccountSettingDefault":{ "name":"PutAccountSettingDefault", @@ -673,7 +673,7 @@ {"shape":"BlockedException"}, {"shape":"ConflictException"} ], - "documentation":"

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

" + "documentation":"

Starts a new task using the specified task definition.

You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places tasks using placement constraints and placement strategies. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

Alternatively, you can use StartTask to use your own scheduler or place tasks manually on specific container instances.

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

The Amazon ECS API follows an eventual consistency model. This is because of the distributed nature of the system supporting the API. This means that the result of an API command you run that affects your Amazon ECS resources might not be immediately visible to all subsequent commands you run. Keep this in mind when you carry out an API command that immediately follows a previous API command.

To manage eventual consistency, you can do the following:

" }, "StartTask":{ "name":"StartTask", @@ -687,9 +687,10 @@ {"shape":"ServerException"}, {"shape":"ClientException"}, {"shape":"InvalidParameterException"}, - {"shape":"ClusterNotFoundException"} + {"shape":"ClusterNotFoundException"}, + {"shape":"UnsupportedFeatureException"} ], - "documentation":"

Starts a new task from the specified task definition on the specified container instance or instances.

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Starts a new task from the specified task definition on the specified container instance or instances.

Starting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.

Alternatively, you can use RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

" }, "StopTask":{ "name":"StopTask", @@ -889,9 +890,10 @@ {"shape":"PlatformUnknownException"}, {"shape":"PlatformTaskDefinitionIncompatibilityException"}, {"shape":"AccessDeniedException"}, - {"shape":"NamespaceNotFoundException"} + {"shape":"NamespaceNotFoundException"}, + {"shape":"UnsupportedFeatureException"} ], - "documentation":"

Modifies the parameters of a service.

For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

If you have updated the Docker image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

You must have a service-linked role when you update any of the following service properties:

  • loadBalancers,

  • serviceRegistries

For more information about the role see the CreateService request parameter role .

" + "documentation":"

Modifies the parameters of a service.

For services using the rolling update (ECS) you can update the desired count, deployment configuration, network configuration, load balancers, service registries, enable ECS managed tags option, propagate tags option, task placement constraints and strategies, and task definition. When you update any of these parameters, Amazon ECS starts new tasks with the new configuration.

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update your volume configurations and trigger a new deployment. volumeConfigurations is only supported for REPLICA service and not DAEMON service. If you leave volumeConfigurations null, it doesn't trigger a new deployment. For more infomation on volumes, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

For services using the blue/green (CODE_DEPLOY) deployment controller, only the desired count, deployment configuration, health check grace period, task placement constraints and strategies, enable ECS managed tags option, and propagate tags can be updated using this API. If the network configuration, platform version, task definition, or load balancer need to be updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.

For services using an external deployment controller, you can update only the desired count, task placement constraints and strategies, health check grace period, enable ECS managed tags option, and propagate tags option, using this API. If the launch type, load balancer, network configuration, platform version, or task definition need to be updated, create a new task set For more information, see CreateTaskSet.

You can add to or subtract from the number of instantiations of a task definition in a service by specifying the cluster that the service is running in and a new desiredCount parameter.

You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or running a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.

If you have updated the container image of your application, you can create a new task definition with that image and deploy it to your service. The service scheduler uses the minimum healthy percent and maximum percent parameters (in the service's deployment configuration) to determine the deployment strategy.

If your updated Docker image uses the same tag as what is in the existing task definition for your service (for example, my_image:latest), you don't need to create a new revision of your task definition. You can update the service using the forceNewDeployment option. The new tasks launched by the deployment pull the current image/tag combination from your repository when they start.

You can also update the deployment configuration of a service. When a deployment is triggered by updating the task definition of a service, the service scheduler uses the deployment configuration parameters, minimumHealthyPercent and maximumPercent, to determine the deployment strategy.

When UpdateService stops a task during a deployment, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM and a 30-second timeout. After this, SIGKILL is sent and the containers are forcibly stopped. If the container handles the SIGTERM gracefully and exits within 30 seconds from receiving it, no SIGKILL is sent.

When the service scheduler launches new tasks, it determines task placement in your cluster with the following logic.

When the service scheduler stops running tasks, it attempts to maintain balance across the Availability Zones in your cluster using the following logic:

You must have a service-linked role when you update any of the following service properties:

  • loadBalancers,

  • serviceRegistries

For more information about the role see the CreateService request parameter role .

" }, "UpdateServicePrimaryTaskSet":{ "name":"UpdateServicePrimaryTaskSet", @@ -998,7 +1000,7 @@ }, "type":{ "shape":"String", - "documentation":"

The type of the attachment, such as ElasticNetworkInterface.

" + "documentation":"

The type of the attachment, such as ElasticNetworkInterface, Service Connect, and AmazonElasticBlockStorage.

" }, "status":{ "shape":"String", @@ -1006,7 +1008,7 @@ }, "details":{ "shape":"AttachmentDetails", - "documentation":"

Details of the attachment. For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.

" + "documentation":"

Details of the attachment.

For elastic network interfaces, this includes the network interface ID, the MAC address, the subnet ID, and the private IPv4 address.

For Service Connect services, this includes portName, clientAliases, discoveryName, and ingressPortOverride.

For elastic block storage, this includes roleArn, encrypted, filesystemType, iops, kmsKeyId, sizeInGiB, snapshotId, tagSpecifications, throughput, and volumeType.

" } }, "documentation":"

An object representing a container instance or task attachment.

" @@ -1093,7 +1095,7 @@ }, "managedDraining":{ "shape":"ManagedDraining", - "documentation":"

The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.

The default is ENABLED.

" + "documentation":"

The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.

" } }, "documentation":"

The details of the Auto Scaling group for the capacity provider.

" @@ -1111,7 +1113,7 @@ }, "managedDraining":{ "shape":"ManagedDraining", - "documentation":"

The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.

The default is ENABLED.

" + "documentation":"

The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.

" } }, "documentation":"

The details of the Auto Scaling group capacity provider to update.

" @@ -1133,7 +1135,7 @@ "documentation":"

Whether the task's elastic network interface receives a public IP address. The default value is DISABLED.

" } }, - "documentation":"

An object representing the networking details for a task or service.

" + "documentation":"

An object representing the networking details for a task or service. For example awsvpcConfiguration={subnets=[\"subnet-12344321\"],securityGroups=[\"sg-12344321\"]}

" }, "BlockedException":{ "type":"structure", @@ -1685,7 +1687,7 @@ }, "ulimits":{ "shape":"UlimitList", - "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 4096.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. Valid naming values are displayed in the Ulimit data type.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 65535.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" }, "logConfiguration":{ "shape":"LogConfiguration", @@ -1697,7 +1699,7 @@ }, "systemControls":{ "shape":"SystemControls", - "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

We don't recommended that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network modes. For tasks that use the awsvpc network mode, the container that's started last determines which systemControls parameters take effect. For tasks that use the host network mode, it changes the container instance's namespaced kernel parameters as well as the containers.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

" }, "resourceRequirements":{ "shape":"ResourceRequirements", @@ -1738,7 +1740,7 @@ "documentation":"

The dependency condition of the container. The following are the available conditions and their behavior:

" } }, - "documentation":"

The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.

Your Amazon ECS container instances require at least version 1.26.0 of the container agent to use container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks that use the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

" + "documentation":"

The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.

Your Amazon ECS container instances require at least version 1.26.0 of the container agent to use container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the ecs-init package. If your container instances are launched from version 20190301 or later, then they contain the required versions of the container agent and ecs-init. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.

For tasks that use the Fargate launch type, the task or service requires the following platforms:

  • Linux platform version 1.3.0 or later.

  • Windows platform version 1.0.0 or later.

For more information about how to create a container dependency, see Container dependency in the Amazon Elastic Container Service Developer Guide.

" }, "ContainerInstance":{ "type":"structure", @@ -2047,7 +2049,7 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The infrastructure that you run your service on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster.

A service can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" + "documentation":"

The infrastructure that you run your service on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster.

A service can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

" }, "capacityProviderStrategy":{ "shape":"CapacityProviderStrategy", @@ -2108,6 +2110,10 @@ "serviceConnectConfiguration":{ "shape":"ServiceConnectConfiguration", "documentation":"

The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.

Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" + }, + "volumeConfigurations":{ + "shape":"ServiceVolumeConfigurations", + "documentation":"

The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.

" } } }, @@ -2434,6 +2440,10 @@ "serviceConnectResources":{ "shape":"ServiceConnectServiceResourceList", "documentation":"

The list of Service Connect resources that are associated with this deployment. Each list entry maps a discovery name to a Cloud Map service name.

" + }, + "volumeConfigurations":{ + "shape":"ServiceVolumeConfigurations", + "documentation":"

The details of the volume that was configuredAtLaunch. You can configure different settings like the size, throughput, volumeType, and ecryption in ServiceManagedEBSVolumeConfiguration. The name of the volume must match the name from the task definition.

" } }, "documentation":"

The details of an Amazon ECS service deployment. This is used only when a service uses the ECS deployment controller type.

" @@ -2492,7 +2502,7 @@ }, "minimumHealthyPercent":{ "shape":"BoxedInteger", - "documentation":"

If a service is using the rolling update (ECS) deployment type, the minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.

For services that do not use a load balancer, the following should be noted:

For services are that do use a load balancer, the following should be noted:

If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.

" + "documentation":"

If a service is using the rolling update (ECS) deployment type, the minimumHealthyPercent represents a lower limit on the number of your service's tasks that must remain in the RUNNING state during a deployment, as a percentage of the desiredCount (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a desiredCount of four tasks and a minimumHealthyPercent of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.

For services that do not use a load balancer, the following should be noted:

For services that do use a load balancer, the following should be noted:

If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and is running tasks that use the EC2 launch type, the minimum healthy percent value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the RUNNING state while the container instances are in the DRAINING state. If a service is using either the blue/green (CODE_DEPLOY) or EXTERNAL deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.

" }, "alarms":{ "shape":"DeploymentAlarms", @@ -2905,6 +2915,42 @@ "documentation":"

This parameter is specified when you're using Docker volumes. Docker volumes are only supported when you're using the EC2 launch type. Windows containers only support the use of the local driver. To use bind mounts, specify a host instead.

" }, "Double":{"type":"double"}, + "Duration":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "EBSKMSKeyId":{"type":"string"}, + "EBSResourceType":{ + "type":"string", + "enum":["volume"] + }, + "EBSSnapshotId":{"type":"string"}, + "EBSTagSpecification":{ + "type":"structure", + "required":["resourceType"], + "members":{ + "resourceType":{ + "shape":"EBSResourceType", + "documentation":"

The type of volume resource.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags applied to this Amazon EBS volume. AmazonECSCreated and AmazonECSManaged are reserved tags that can't be used.

" + }, + "propagateTags":{ + "shape":"PropagateTags", + "documentation":"

Determines whether to propagate the tags from the task definition to 
the Amazon EBS volume. Tags can only propagate to a SERVICE specified in 
ServiceVolumeConfiguration. If no value is specified, the tags aren't 
propagated.

" + } + }, + "documentation":"

The tag specifications of an Amazon EBS volume.

" + }, + "EBSTagSpecifications":{ + "type":"list", + "member":{"shape":"EBSTagSpecification"} + }, + "EBSVolumeType":{"type":"string"}, + "ECSVolumeName":{"type":"string"}, "EFSAuthorizationConfig":{ "type":"structure", "members":{ @@ -2999,7 +3045,7 @@ "documentation":"

The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB.

" } }, - "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for Fargate.

For tasks using the Fargate launch type, the task requires the following platforms:

  • Linux platform version 1.4.0 or later.

  • Windows platform version 1.0.0 or later.

" + "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Using data volumes in tasks in the Amazon ECS Developer Guide;.

For tasks using the Fargate launch type, the task requires the following platforms:

  • Linux platform version 1.4.0 or later.

  • Windows platform version 1.0.0 or later.

" }, "ExecuteCommandConfiguration":{ "type":"structure", @@ -3300,6 +3346,7 @@ }, "documentation":"

Details on a container instance bind mount host volume.

" }, + "IAMRoleArn":{"type":"string"}, "InferenceAccelerator":{ "type":"structure", "required":[ @@ -3848,7 +3895,7 @@ }, "containerName":{ "shape":"String", - "documentation":"

The name of the container (as it appears in a container definition) to associate with the load balancer.

" + "documentation":"

The name of the container (as it appears in a container definition) to associate with the load balancer.

You need to specify the container name when configuring the target group for an Amazon ECS load balancer.

" }, "containerPort":{ "shape":"BoxedInteger", @@ -4357,7 +4404,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

The resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the ENI limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the default wait time to retire a Fargate task due to required maintenance is affected.

When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide.

When Amazon Web Services determines that a security or infrastructure update is needed for an Amazon ECS task hosted on Fargate, the tasks need to be stopped and new tasks launched to replace them. Use fargateTaskRetirementWaitPeriod to set the wait time to retire a Fargate task to the default. For information about the Fargate tasks maintenance, see Amazon Web Services Fargate task maintenance in the Amazon ECS Developer Guide.

The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

" + "documentation":"

The resource name for which to modify the account setting.

The following are the valid values for the account setting name.

" }, "value":{ "shape":"String", @@ -4383,7 +4430,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

The Amazon ECS resource name for which to modify the account setting. If you specify serviceLongArnFormat, the ARN for your Amazon ECS services is affected. If you specify taskLongArnFormat, the ARN and resource ID for your Amazon ECS tasks is affected. If you specify containerInstanceLongArnFormat, the ARN and resource ID for your Amazon ECS container instances is affected. If you specify awsvpcTrunking, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If you specify containerInsights, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If you specify fargateFIPSMode, Fargate FIPS 140 compliance is affected. If you specify tagResourceAuthorization, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. If you specify fargateTaskRetirementWaitPeriod, the wait time to retire a Fargate task is affected.

The guardDutyActivate parameter is read-only in Amazon ECS and indicates whether Amazon ECS Runtime Monitoring is enabled or disabled by your security administrator in your Amazon ECS account. Amazon GuardDuty controls this account setting on your behalf. For more information, see Protecting Amazon ECS workloads with Amazon ECS Runtime Monitoring.

" + "documentation":"

The Amazon ECS account setting name to modify.

The following are the valid values for the account setting name.

" }, "value":{ "shape":"String", @@ -4577,7 +4624,7 @@ }, "ephemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for Fargate.

For tasks using the Fargate launch type, the task requires the following platforms:

  • Linux platform version 1.4.0 or later.

  • Windows platform version 1.0.0 or later.

" + "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Using data volumes in tasks in the Amazon ECS Developer Guide.

For tasks using the Fargate launch type, the task requires the following platforms:

  • Linux platform version 1.4.0 or later.

  • Windows platform version 1.0.0 or later.

" }, "runtimePlatform":{ "shape":"RuntimePlatform", @@ -4724,7 +4771,7 @@ }, "launchType":{ "shape":"LaunchType", - "documentation":"

The infrastructure to run your standalone task on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS User Guide for Fargate.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster.

A task can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

When you use cluster auto scaling, you must specify capacityProviderStrategy and not launchType.

" + "documentation":"

The infrastructure to run your standalone task on. For more information, see Amazon ECS launch types in the Amazon Elastic Container Service Developer Guide.

The FARGATE launch type runs your tasks on Fargate On-Demand infrastructure.

Fargate Spot infrastructure is available for use but a capacity provider strategy must be used. For more information, see Fargate capacity providers in the Amazon ECS Developer Guide.

The EC2 launch type runs your tasks on Amazon EC2 instances registered to your cluster.

The EXTERNAL launch type runs your tasks on your on-premises server or virtual machine (VM) capacity registered to your cluster.

A task can use either a launch type or a capacity provider strategy. If a launchType is specified, the capacityProviderStrategy parameter must be omitted.

When you use cluster auto scaling, you must specify capacityProviderStrategy and not launchType.

" }, "networkConfiguration":{ "shape":"NetworkConfiguration", @@ -4770,6 +4817,10 @@ "shape":"String", "documentation":"

An identifier that you provide to ensure the idempotency of the request. It must be unique and is case sensitive. Up to 64 characters are allowed. The valid characters are characters in the range of 33-126, inclusive. For more information, see Ensuring idempotency.

", "idempotencyToken":true + }, + "volumeConfigurations":{ + "shape":"TaskVolumeConfigurations", + "documentation":"

The details of the volume that was configuredAtLaunch. You can configure the size, volumeType, IOPS, throughput, snapshot and encryption in in TaskManagedEBSVolumeConfiguration. The name of the volume must match the name from the task definition.

" } } }, @@ -4782,7 +4833,7 @@ }, "failures":{ "shape":"Failures", - "documentation":"

Any failures associated with the call.

" + "documentation":"

Any failures associated with the call.

For information about how to address failures, see Service event messages and API failure reasons in the Amazon Elastic Container Service Developer Guide.

" } } }, @@ -5058,6 +5109,14 @@ "ingressPortOverride":{ "shape":"PortNumber", "documentation":"

The port number for the Service Connect proxy to listen on.

Use the value of this field to bypass the proxy for traffic on the port number specified in the named portMapping in the task definition of this application, and then use it in your VPC security groups to allow traffic into the proxy for this Amazon ECS service.

In awsvpc mode and Fargate, the default value is the container port number. The container port number is in the portMapping in the task definition. In bridge mode, the default value is the ephemeral port of the Service Connect proxy.

" + }, + "timeout":{ + "shape":"TimeoutConfiguration", + "documentation":"

A reference to an object that represents the configured timeouts for Service Connect.

" + }, + "tls":{ + "shape":"ServiceConnectTlsConfiguration", + "documentation":"

A reference to an object that represents a Transport Layer Security (TLS) configuration.

" } }, "documentation":"

The Service Connect service object configuration. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" @@ -5084,6 +5143,35 @@ "type":"list", "member":{"shape":"ServiceConnectServiceResource"} }, + "ServiceConnectTlsCertificateAuthority":{ + "type":"structure", + "members":{ + "awsPcaAuthorityArn":{ + "shape":"String", + "documentation":"

The ARN of the Amazon Web Services Private Certificate Authority certificate.

" + } + }, + "documentation":"

An object that represents the Amazon Web Services Private Certificate Authority certificate.

" + }, + "ServiceConnectTlsConfiguration":{ + "type":"structure", + "required":["issuerCertificateAuthority"], + "members":{ + "issuerCertificateAuthority":{ + "shape":"ServiceConnectTlsCertificateAuthority", + "documentation":"

The signer certificate authority.

" + }, + "kmsKey":{ + "shape":"String", + "documentation":"

The Amazon Web Services Key Management Service key.

" + }, + "roleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that's associated with the Service Connect TLS.

" + } + }, + "documentation":"

An object that represents the configuration for Service Connect TLS.

" + }, "ServiceEvent":{ "type":"structure", "members":{ @@ -5114,6 +5202,53 @@ "type":"list", "member":{"shape":"ServiceField"} }, + "ServiceManagedEBSVolumeConfiguration":{ + "type":"structure", + "required":["roleArn"], + "members":{ + "encrypted":{ + "shape":"BoxedBoolean", + "documentation":"

Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "kmsKeyId":{ + "shape":"EBSKMSKeyId", + "documentation":"

The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the Amazon EC2 API Reference.

Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.

" + }, + "volumeType":{ + "shape":"EBSVolumeType", + "documentation":"

The volume type. This parameter maps 1:1 with the VolumeType parameter of the CreateVolume API in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide.

The following are the supported volume types.

" + }, + "sizeInGiB":{ + "shape":"BoxedInteger", + "documentation":"

The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the Size parameter of the CreateVolume API in the Amazon EC2 API Reference.

The following are the supported volume size values for each volume type.

" + }, + "snapshotId":{ + "shape":"EBSSnapshotId", + "documentation":"

The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "iops":{ + "shape":"BoxedInteger", + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type.

This parameter is required for io1 and io2 volume types. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for st1, sc1, or standard volume types.

This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "throughput":{ + "shape":"BoxedInteger", + "documentation":"

The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter maps 1:1 with the Throughput parameter of the CreateVolume API in the Amazon EC2 API Reference.

This parameter is only supported for the gp3 volume type.

" + }, + "tagSpecifications":{ + "shape":"EBSTagSpecifications", + "documentation":"

The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "roleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more information, see Amazon ECS infrastructure IAM role in the Amazon ECS Developer Guide.

" + }, + "filesystemType":{ + "shape":"TaskFilesystemType", + "documentation":"

The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.

The available filesystem types are
 ext3, ext4, and xfs. If no value is specified, the xfs filesystem type is used by default.

" + } + }, + "documentation":"

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service.

Many of these parameters map 1:1 with the Amazon EBS CreateVolume API request parameters.

" + }, "ServiceNotActiveException":{ "type":"structure", "members":{ @@ -5154,6 +5289,25 @@ }, "documentation":"

The details for the service registry.

Each service may be associated with one service registry. Multiple service registries for each service are not supported.

When you add, update, or remove the service registries configuration, Amazon ECS starts a new deployment. New tasks are registered and deregistered to the updated service registry configuration.

" }, + "ServiceVolumeConfiguration":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"ECSVolumeName", + "documentation":"

The name of the volume. This value must match the volume name from the Volume object in the task definition.

" + }, + "managedEBSVolume":{ + "shape":"ServiceManagedEBSVolumeConfiguration", + "documentation":"

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task in the service. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are created.

" + } + }, + "documentation":"

The configuration for a volume specified in the task definition as a volume that is configured at launch time. Currently, the only supported volume type is an Amazon EBS volume.

" + }, + "ServiceVolumeConfigurations":{ + "type":"list", + "member":{"shape":"ServiceVolumeConfiguration"} + }, "Services":{ "type":"list", "member":{"shape":"Service"} @@ -5291,6 +5445,10 @@ "taskDefinition":{ "shape":"String", "documentation":"

The family and revision (family:revision) or full ARN of the task definition to start. If a revision isn't specified, the latest ACTIVE revision is used.

" + }, + "volumeConfigurations":{ + "shape":"TaskVolumeConfigurations", + "documentation":"

The details of the volume that was configuredAtLaunch. You can configure the size, volumeType, IOPS, throughput, snapshot and encryption in TaskManagedEBSVolumeConfiguration. The name of the volume must match the name from the task definition.

" } } }, @@ -5325,7 +5483,7 @@ }, "reason":{ "shape":"String", - "documentation":"

An optional message specified when a task is stopped. For example, if you're using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message appears in subsequent DescribeTasks API operations on this task. Up to 255 characters are allowed in this message.

" + "documentation":"

An optional message specified when a task is stopped. For example, if you're using a custom scheduler, you can use this parameter to specify the reason for stopping the task here, and the message appears in subsequent DescribeTasks API operations on this task.

" } } }, @@ -5483,7 +5641,7 @@ "documentation":"

The namespaced kernel parameter to set a value for.

Valid IPC namespace values: \"kernel.msgmax\" | \"kernel.msgmnb\" | \"kernel.msgmni\" | \"kernel.sem\" | \"kernel.shmall\" | \"kernel.shmmax\" | \"kernel.shmmni\" | \"kernel.shm_rmid_forced\", and Sysctls that start with \"fs.mqueue.*\"

Valid network namespace values: Sysctls that start with \"net.*\"

All of these values are supported by Fargate.

" } }, - "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run.

We don't recommend that you specify network-related systemControls parameters for multiple containers in a single task. This task also uses either the awsvpc or host network mode. It does it for the following reasons.

" + "documentation":"

A list of namespaced kernel parameters to set in the container. This parameter maps to Sysctls in the Create a container section of the Docker Remote API and the --sysctl option to docker run. For example, you can configure net.ipv4.tcp_keepalive_time setting to maintain longer lived connections.

We don't recommend that you specify network-related systemControls parameters for multiple containers in a single task that also uses either the awsvpc or host network mode. Doing this has the following disadvantages:

If you're setting an IPC resource namespace to use for the containers in the task, the following conditions apply to your system controls. For more information, see IPC mode.

This parameter is not supported for Windows containers.

This parameter is only supported for tasks that are hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't supported for Windows containers on Fargate.

" }, "SystemControls":{ "type":"list", @@ -5678,7 +5836,7 @@ }, "stopCode":{ "shape":"TaskStopCode", - "documentation":"

The stop code indicating why a task was stopped. The stoppedReason might contain additional details.

For more information about stop code, see Stopped tasks error codes in the Amazon ECS User Guide.

The following are valid values:

" + "documentation":"

The stop code indicating why a task was stopped. The stoppedReason might contain additional details.

For more information about stop code, see Stopped tasks error codes in the Amazon ECS Developer Guide.

" }, "stoppedAt":{ "shape":"Timestamp", @@ -5875,6 +6033,76 @@ "type":"list", "member":{"shape":"TaskField"} }, + "TaskFilesystemType":{ + "type":"string", + "enum":[ + "ext3", + "ext4", + "xfs" + ] + }, + "TaskManagedEBSVolumeConfiguration":{ + "type":"structure", + "required":["roleArn"], + "members":{ + "encrypted":{ + "shape":"BoxedBoolean", + "documentation":"

Indicates whether the volume should be encrypted. If no value is specified, encryption is turned on by default. This parameter maps 1:1 with the Encrypted parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "kmsKeyId":{ + "shape":"EBSKMSKeyId", + "documentation":"

The Amazon Resource Name (ARN) identifier of the Amazon Web Services Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no Amazon Web Services Key Management Service key is specified, the default Amazon Web Services managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the KmsKeyId parameter of the CreateVolume API in the Amazon EC2 API Reference.

Amazon Web Services authenticates the Amazon Web Services Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.

" + }, + "volumeType":{ + "shape":"EBSVolumeType", + "documentation":"

The volume type. This parameter maps 1:1 with the VolumeType parameter of the CreateVolume API in the Amazon EC2 API Reference. For more information, see Amazon EBS volume types in the Amazon EC2 User Guide.

The following are the supported volume types.

" + }, + "sizeInGiB":{ + "shape":"BoxedInteger", + "documentation":"

The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the Size parameter of the CreateVolume API in the Amazon EC2 API Reference.

The following are the supported volume size values for each volume type.

" + }, + "snapshotId":{ + "shape":"EBSSnapshotId", + "documentation":"

The snapshot that Amazon ECS uses to create the volume. You must specify either a snapshot ID or a volume size. This parameter maps 1:1 with the SnapshotId parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "iops":{ + "shape":"BoxedInteger", + "documentation":"

The number of I/O operations per second (IOPS). For gp3, io1, and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.

The following are the supported values for each volume type.

This parameter is required for io1 and io2 volume types. The default for gp3 volumes is 3,000 IOPS. This parameter is not supported for st1, sc1, or standard volume types.

This parameter maps 1:1 with the Iops parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "throughput":{ + "shape":"BoxedInteger", + "documentation":"

The throughput to provision for a volume, in MiB/s, with a maximum of 1,000 MiB/s. This parameter maps 1:1 with the Throughput parameter of the CreateVolume API in the Amazon EC2 API Reference.

This parameter is only supported for the gp3 volume type.

" + }, + "tagSpecifications":{ + "shape":"EBSTagSpecifications", + "documentation":"

The tags to apply to the volume. Amazon ECS applies service-managed tags by default. This parameter maps 1:1 with the TagSpecifications.N parameter of the CreateVolume API in the Amazon EC2 API Reference.

" + }, + "roleArn":{ + "shape":"IAMRoleArn", + "documentation":"

The ARN of the IAM role to associate with this volume. This is the Amazon ECS infrastructure IAM role that is used to manage your Amazon Web Services infrastructure. We recommend using the Amazon ECS-managed AmazonECSInfrastructureRolePolicyForVolumes IAM policy with this role. For more information, see Amazon ECS infrastructure IAM role in the Amazon ECS Developer Guide.

" + }, + "terminationPolicy":{ + "shape":"TaskManagedEBSVolumeTerminationPolicy", + "documentation":"

The termination policy for the volume when the task exits. This provides a way to control whether Amazon ECS terminates the Amazon EBS volume when the task stops.

" + }, + "filesystemType":{ + "shape":"TaskFilesystemType", + "documentation":"

The Linux filesystem type for the volume. For volumes created from a snapshot, you must specify the same filesystem type that the volume was using when the snapshot was created. If there is a filesystem type mismatch, the task will fail to start.

The available filesystem types are
 ext3, ext4, and xfs. If no value is specified, the xfs filesystem type is used by default.

" + } + }, + "documentation":"

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task.

" + }, + "TaskManagedEBSVolumeTerminationPolicy":{ + "type":"structure", + "required":["deleteOnTermination"], + "members":{ + "deleteOnTermination":{ + "shape":"BoxedBoolean", + "documentation":"

Indicates whether the volume should be deleted on when the task stops. If a value of true is specified, 
Amazon ECS deletes the Amazon EBS volume on your behalf when the task goes into the STOPPED state. If no value is specified, the 
default value is true is used. When set to false, Amazon ECS leaves the volume in your 
account.

" + } + }, + "documentation":"

The termination policy for the Amazon EBS volume when the task exits. For more information, see Amazon ECS volume termination policy.

" + }, "TaskOverride":{ "type":"structure", "members":{ @@ -6041,10 +6269,43 @@ "TerminationNotice" ] }, + "TaskVolumeConfiguration":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"ECSVolumeName", + "documentation":"

The name of the volume. This value must match the volume name from the Volume object in the task definition.

" + }, + "managedEBSVolume":{ + "shape":"TaskManagedEBSVolumeConfiguration", + "documentation":"

The configuration for the Amazon EBS volume that Amazon ECS creates and manages on your behalf. These settings are used to create each Amazon EBS volume, with one volume created for each task. The Amazon EBS volumes are visible in your account in the Amazon EC2 console once they are created.

" + } + }, + "documentation":"

Configuration settings for the task volume that was configuredAtLaunch that weren't set during RegisterTaskDef.

" + }, + "TaskVolumeConfigurations":{ + "type":"list", + "member":{"shape":"TaskVolumeConfiguration"} + }, "Tasks":{ "type":"list", "member":{"shape":"Task"} }, + "TimeoutConfiguration":{ + "type":"structure", + "members":{ + "idleTimeoutSeconds":{ + "shape":"Duration", + "documentation":"

The amount of time in seconds a connection will stay active while idle. A value of 0 can be set to disable idleTimeout.

The idleTimeout default for HTTP/HTTP2/GRPC is 5 minutes.

The idleTimeout default for TCP is 1 hour.

" + }, + "perRequestTimeoutSeconds":{ + "shape":"Duration", + "documentation":"

The amount of time waiting for the upstream to respond with a complete response per request. A value of 0 can be set to disable perRequestTimeout. perRequestTimeout can only be set if Service Connect appProtocol isn't TCP. Only idleTimeout is allowed for TCP appProtocol.

" + } + }, + "documentation":"

An object that represents the timeout configurations for Service Connect.

If idleTimeout is set to a time that is less than perRequestTimeout, the connection will close when the idleTimeout is reached and not the perRequestTimeout.

" + }, "Timestamp":{"type":"timestamp"}, "Tmpfs":{ "type":"structure", @@ -6100,7 +6361,7 @@ "documentation":"

The hard limit for the ulimit type.

" } }, - "documentation":"

The ulimit settings to pass to the container.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 4096.

You can specify the ulimit settings for a container in a task definition.

" + "documentation":"

The ulimit settings to pass to the container.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 65535.

You can specify the ulimit settings for a container in a task definition.

" }, "UlimitList":{ "type":"list", @@ -6408,6 +6669,10 @@ "serviceConnectConfiguration":{ "shape":"ServiceConnectConfiguration", "documentation":"

The configuration for this service to discover and connect to services, and be discovered by, and connected from, other services within a namespace.

Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.

" + }, + "volumeConfigurations":{ + "shape":"ServiceVolumeConfigurations", + "documentation":"

The details of the volume that was configuredAtLaunch. You can configure the size, volumeType, IOPS, throughput, snapshot and encryption in ServiceManagedEBSVolumeConfiguration. The name of the volume must match the name from the task definition. If set to null, no new deployment is triggered. Otherwise, if this configuration differs from the existing one, it triggers a new deployment.

" } } }, @@ -6518,7 +6783,7 @@ "members":{ "name":{ "shape":"String", - "documentation":"

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.

This is required wwhen you use an Amazon EFS volume.

" + "documentation":"

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.

When using a volume configured at launch, the name is required and must also be specified as the volume name in the ServiceVolumeConfiguration or TaskVolumeConfiguration parameter when creating your service or standalone task.

For all other types of volumes, this name is referenced in the sourceVolume parameter of the mountPoints object in the container definition.

When a volume is using the efsVolumeConfiguration, the name is required.

" }, "host":{ "shape":"HostVolumeProperties", @@ -6535,9 +6800,13 @@ "fsxWindowsFileServerVolumeConfiguration":{ "shape":"FSxWindowsFileServerVolumeConfiguration", "documentation":"

This parameter is specified when you use Amazon FSx for Windows File Server file system for task storage.

" + }, + "configuredAtLaunch":{ + "shape":"BoxedBoolean", + "documentation":"

Indicates whether the volume should be configured at launch time. This is used to create Amazon EBS volumes for standalone tasks or tasks created as part of a service. Each task definition revision may only have one volume configured at launch in the volume configuration.

To configure a volume at launch time, use this task definition revision and specify a volumeConfigurations object when calling the CreateService, UpdateService, RunTask or StartTask APIs.

" } }, - "documentation":"

A data volume that's used in a task definition. For tasks that use the Amazon Elastic File System (Amazon EFS), specify an efsVolumeConfiguration. For Windows tasks that use Amazon FSx for Windows File Server file system, specify a fsxWindowsFileServerVolumeConfiguration. For tasks that use a Docker volume, specify a DockerVolumeConfiguration. For tasks that use a bind mount host volume, specify a host and optional sourcePath. For more information, see Using Data Volumes in Tasks.

" + "documentation":"

The data volume configuration for tasks launched using this task definition. Specifying a volume configuration in a task definition is optional. The volume configuration may contain multiple volumes but only one volume configured at launch is supported. Each volume defined in the volume configuration may only specify a name and one of either configuredAtLaunch, dockerVolumeConfiguration, efsVolumeConfiguration, fsxWindowsFileServerVolumeConfiguration, or host. If an empty volume configuration is specified, by default Amazon ECS uses a host volume. For more information, see Using data volumes in tasks.

" }, "VolumeFrom":{ "type":"structure", diff -Nru awscli-2.15.9/awscli/botocore/data/elbv2/2015-12-01/service-2.json awscli-2.15.22/awscli/botocore/data/elbv2/2015-12-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/elbv2/2015-12-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/elbv2/2015-12-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1352,11 +1352,11 @@ }, "Subnets":{ "shape":"Subnets", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" + "documentation":"

The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.

" + "documentation":"

The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.

" }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -3549,7 +3549,7 @@ "members":{ "Values":{ "shape":"ListOfString", - "documentation":"

The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.

If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.

" + "documentation":"

The source IP addresses, in CIDR format. You can use both IPv4 and IPv6 addresses. Wildcards are not supported.

If you specify multiple addresses, the condition is satisfied if the source IP address of the request matches one of the CIDR blocks. This condition is not satisfied by the addresses in the X-Forwarded-For header. To search for addresses in the X-Forwarded-For header, use HttpHeaderConditionConfig.

The total number of values must be less than, or equal to five.

" } }, "documentation":"

Information about a source IP condition.

You can use this condition to route based on the IP address of the source that connects to the load balancer. If a client is behind a proxy, this is the IP address of the proxy not the IP address of the client.

" @@ -3813,7 +3813,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by all load balancers:

The following attributes are supported by Application Load Balancers and Network Load Balancers:

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

The following attributes are supported only by Network Load Balancers:

The following attributes are supported only by Gateway Load Balancers:

" + "documentation":"

The name of the attribute.

The following attributes are supported by all load balancers:

The following attributes are supported by Application Load Balancers and Network Load Balancers:

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

The following attributes are supported only by Network Load Balancers:

The following attributes are supported only by Gateway Load Balancers:

" }, "Value":{ "shape":"TargetGroupAttributeValue", @@ -3962,6 +3962,7 @@ "initial", "healthy", "unhealthy", + "unhealthy.draining", "unused", "draining", "unavailable" diff -Nru awscli-2.15.9/awscli/botocore/data/emr/2009-03-31/service-2.json awscli-2.15.22/awscli/botocore/data/emr/2009-03-31/service-2.json --- awscli-2.15.9/awscli/botocore/data/emr/2009-03-31/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/emr/2009-03-31/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -652,7 +652,19 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, seeManaging Cluster Termination in the Amazon EMR Management Guide.

" + "documentation":"

SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error. The cluster still terminates upon successful completion of the job flow. Calling SetTerminationProtection on a cluster is similar to calling the Amazon EC2 DisableAPITermination API on all Amazon EC2 instances in a cluster.

SetTerminationProtection is used to prevent accidental termination of a cluster and to ensure that in the event of an error, the instances persist so that you can recover any data stored in their ephemeral instance storage.

To terminate a cluster that has been locked by setting SetTerminationProtection to true, you must first unlock the job flow by a subsequent call to SetTerminationProtection in which you set the value to false.

For more information, see Managing Cluster Termination in the Amazon EMR Management Guide.

" + }, + "SetUnhealthyNodeReplacement":{ + "name":"SetUnhealthyNodeReplacement", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"SetUnhealthyNodeReplacementInput"}, + "errors":[ + {"shape":"InternalServerError"} + ], + "documentation":"

Specify whether to enable unhealthy node replacement, which lets Amazon EMR gracefully replace core nodes on a cluster if any nodes become unhealthy. For example, a node becomes unhealthy if disk usage is above 90%. If unhealthy node replacement is on and TerminationProtected are off, Amazon EMR immediately terminates the unhealthy core nodes. To use unhealthy node replacement and retain unhealthy core nodes, use to turn on termination protection. In such cases, Amazon EMR adds the unhealthy nodes to a denylist, reducing job interruptions and failures.

If unhealthy node replacement is on, Amazon EMR notifies YARN and other applications on the cluster to stop scheduling tasks with these nodes, moves the data, and then terminates the nodes.

For more information, see graceful node replacement in the Amazon EMR Management Guide.

" }, "SetVisibleToAllUsers":{ "name":"SetVisibleToAllUsers", @@ -1238,6 +1250,10 @@ "shape":"Boolean", "documentation":"

Indicates whether Amazon EMR will lock the cluster to prevent the Amazon EC2 instances from being terminated by an API call or user intervention, or in the event of a cluster error.

" }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether Amazon EMR should gracefully replace Amazon EC2 core instances that have degraded within the cluster.

" + }, "VisibleToAllUsers":{ "shape":"Boolean", "documentation":"

Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated with the cluster. When true, IAM principals in the Amazon Web Services account can perform Amazon EMR cluster actions on the cluster that their IAM policies allow. When false, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform Amazon EMR actions, regardless of IAM permissions policies attached to other IAM principals.

The default value is true if a value is not provided when creating a cluster using the Amazon EMR API RunJobFlow command, the CLI create-cluster command, or the Amazon Web Services Management Console.

" @@ -3359,6 +3375,10 @@ "shape":"Boolean", "documentation":"

Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.

" }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.

" + }, "HadoopVersion":{ "shape":"XmlStringMaxLen256", "documentation":"

Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (no longer maintained), \"0.20\" (no longer maintained), \"0.20.205\" (no longer maintained), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the AmiVersion parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.

" @@ -3450,6 +3470,10 @@ "shape":"Boolean", "documentation":"

Specifies whether the Amazon EC2 instances in the cluster are protected from termination by API calls, user intervention, or in the event of a job-flow error.

" }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether Amazon EMR should gracefully replace core nodes that have degraded within the cluster.

" + }, "HadoopVersion":{ "shape":"XmlStringMaxLen256", "documentation":"

The Hadoop version for the cluster.

" @@ -4895,6 +4919,23 @@ }, "documentation":"

The input argument to the TerminationProtection operation.

" }, + "SetUnhealthyNodeReplacementInput":{ + "type":"structure", + "required":[ + "JobFlowIds", + "UnhealthyNodeReplacement" + ], + "members":{ + "JobFlowIds":{ + "shape":"XmlStringList", + "documentation":"

The list of strings that uniquely identify the clusters for which to turn on unhealthy node replacement. You can get these identifiers by running the RunJobFlow or the DescribeJobFlows operations.

" + }, + "UnhealthyNodeReplacement":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether to turn on or turn off graceful unhealthy node replacement.

" + } + } + }, "SetVisibleToAllUsersInput":{ "type":"structure", "required":[ diff -Nru awscli-2.15.9/awscli/botocore/data/endpoints.json awscli-2.15.22/awscli/botocore/data/endpoints.json --- awscli-2.15.9/awscli/botocore/data/endpoints.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/endpoints.json 2024-02-21 17:34:54.000000000 +0000 @@ -349,6 +349,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "acm-pca-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -364,6 +370,13 @@ "deprecated" : true, "hostname" : "acm-pca-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "acm-pca-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -1938,6 +1951,7 @@ "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { }, "eu-central-1" : { }, "eu-central-2" : { }, @@ -1947,6 +1961,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "il-central-1" : { }, "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, @@ -2244,10 +2259,58 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-1-fips" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2-fips" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1-fips" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "auditmanager-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2-fips" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "auditmanager-fips.us-west-2.amazonaws.com" + } } }, "autoscaling" : { @@ -3197,6 +3260,7 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -5800,6 +5864,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "ec2-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-central-2" : { }, "eu-north-1" : { }, @@ -5820,6 +5890,13 @@ "deprecated" : true, "hostname" : "ec2-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "ec2-fips.ca-west-1.amazonaws.com" + }, "fips-us-east-1" : { "credentialScope" : { "region" : "us-east-1" @@ -6407,6 +6484,12 @@ "tags" : [ "fips" ] } ] }, + "ca-west-1" : { + "variants" : [ { + "hostname" : "elasticfilesystem-fips.ca-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { "variants" : [ { "hostname" : "elasticfilesystem-fips.eu-central-1.amazonaws.com", @@ -6539,6 +6622,13 @@ "deprecated" : true, "hostname" : "elasticfilesystem-fips.ca-central-1.amazonaws.com" }, + "fips-ca-west-1" : { + "credentialScope" : { + "region" : "ca-west-1" + }, + "deprecated" : true, + "hostname" : "elasticfilesystem-fips.ca-west-1.amazonaws.com" + }, "fips-eu-central-1" : { "credentialScope" : { "region" : "eu-central-1" @@ -11279,6 +11369,7 @@ "eu-central-1" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -11297,6 +11388,7 @@ "fips-us-west-2" : { "deprecated" : true }, + "il-central-1" : { }, "sa-east-1" : { }, "us-east-1" : { "variants" : [ { @@ -11565,9 +11657,12 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-4" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, @@ -12585,6 +12680,12 @@ }, "hostname" : "oidc.il-central-1.amazonaws.com" }, + "me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "hostname" : "oidc.me-central-1.amazonaws.com" + }, "me-south-1" : { "credentialScope" : { "region" : "me-south-1" @@ -12755,8 +12856,10 @@ "ap-southeast-2" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-1" : { }, @@ -13234,6 +13337,12 @@ }, "hostname" : "portal.sso.il-central-1.amazonaws.com" }, + "me-central-1" : { + "credentialScope" : { + "region" : "me-central-1" + }, + "hostname" : "portal.sso.me-central-1.amazonaws.com" + }, "me-south-1" : { "credentialScope" : { "region" : "me-south-1" @@ -13272,6 +13381,13 @@ } } }, + "private-networks" : { + "endpoints" : { + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "profile" : { "endpoints" : { "af-south-1" : { }, @@ -14125,16 +14241,76 @@ "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, - "ca-central-1" : { }, + "ca-central-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.ca-central-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com" + }, + "us-east-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "redshift-serverless-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "rekognition" : { @@ -14325,96 +14501,26 @@ } }, "resource-explorer-2" : { - "defaults" : { - "dnsSuffix" : "api.aws", - "variants" : [ { - "dnsSuffix" : "api.aws", - "hostname" : "{service}-fips.{region}.{dnsSuffix}", - "tags" : [ "fips" ] - } ] - }, "endpoints" : { - "af-south-1" : { - "hostname" : "resource-explorer-2.af-south-1.api.aws" - }, - "ap-east-1" : { - "hostname" : "resource-explorer-2.ap-east-1.api.aws" - }, - "ap-northeast-1" : { - "hostname" : "resource-explorer-2.ap-northeast-1.api.aws" - }, - "ap-northeast-2" : { - "hostname" : "resource-explorer-2.ap-northeast-2.api.aws" - }, - "ap-northeast-3" : { - "hostname" : "resource-explorer-2.ap-northeast-3.api.aws" - }, - "ap-south-1" : { - "hostname" : "resource-explorer-2.ap-south-1.api.aws" - }, - "ap-south-2" : { - "hostname" : "resource-explorer-2.ap-south-2.api.aws" - }, - "ap-southeast-1" : { - "hostname" : "resource-explorer-2.ap-southeast-1.api.aws" - }, - "ap-southeast-2" : { - "hostname" : "resource-explorer-2.ap-southeast-2.api.aws" - }, - "ap-southeast-3" : { - "hostname" : "resource-explorer-2.ap-southeast-3.api.aws" - }, - "ap-southeast-4" : { - "hostname" : "resource-explorer-2.ap-southeast-4.api.aws" - }, - "ca-central-1" : { - "hostname" : "resource-explorer-2.ca-central-1.api.aws" - }, - "eu-central-1" : { - "hostname" : "resource-explorer-2.eu-central-1.api.aws" - }, - "eu-central-2" : { - "hostname" : "resource-explorer-2.eu-central-2.api.aws" - }, - "eu-north-1" : { - "hostname" : "resource-explorer-2.eu-north-1.api.aws" - }, - "eu-south-1" : { - "hostname" : "resource-explorer-2.eu-south-1.api.aws" - }, - "eu-west-1" : { - "hostname" : "resource-explorer-2.eu-west-1.api.aws" - }, - "eu-west-2" : { - "hostname" : "resource-explorer-2.eu-west-2.api.aws" - }, - "eu-west-3" : { - "hostname" : "resource-explorer-2.eu-west-3.api.aws" - }, - "il-central-1" : { - "hostname" : "resource-explorer-2.il-central-1.api.aws" - }, - "me-central-1" : { - "hostname" : "resource-explorer-2.me-central-1.api.aws" - }, - "me-south-1" : { - "hostname" : "resource-explorer-2.me-south-1.api.aws" - }, - "sa-east-1" : { - "hostname" : "resource-explorer-2.sa-east-1.api.aws" - }, - "us-east-1" : { - "hostname" : "resource-explorer-2.us-east-1.api.aws" - }, - "us-east-2" : { - "hostname" : "resource-explorer-2.us-east-2.api.aws" - }, - "us-west-1" : { - "hostname" : "resource-explorer-2.us-west-1.api.aws" - }, - "us-west-2" : { - "hostname" : "resource-explorer-2.us-west-2.api.aws" - } + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } } }, "resource-groups" : { @@ -15934,11 +16040,59 @@ "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "deprecated" : true, + "hostname" : "securitylake-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "securitylake-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "securitylake-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "deprecated" : true, + "hostname" : "securitylake-fips.us-west-2.amazonaws.com" + }, "sa-east-1" : { }, - "us-east-1" : { }, - "us-east-2" : { }, - "us-west-1" : { }, - "us-west-2" : { } + "us-east-1" : { + "variants" : [ { + "hostname" : "securitylake-fips.us-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-east-2" : { + "variants" : [ { + "hostname" : "securitylake-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "securitylake-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-2" : { + "variants" : [ { + "hostname" : "securitylake-fips.us-west-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + } } }, "serverlessrepo" : { @@ -16773,10 +16927,16 @@ }, "sms-voice" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ap-southeast-3" : { }, + "ap-southeast-4" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "sms-voice-fips.ca-central-1.amazonaws.com", @@ -16784,8 +16944,13 @@ } ] }, "eu-central-1" : { }, + "eu-central-2" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -16800,6 +16965,20 @@ "deprecated" : true, "hostname" : "sms-voice-fips.us-east-1.amazonaws.com" }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "deprecated" : true, + "hostname" : "sms-voice-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "deprecated" : true, + "hostname" : "sms-voice-fips.us-west-1.amazonaws.com" + }, "fips-us-west-2" : { "credentialScope" : { "region" : "us-west-2" @@ -16807,12 +16986,28 @@ "deprecated" : true, "hostname" : "sms-voice-fips.us-west-2.amazonaws.com" }, + "il-central-1" : { }, + "me-central-1" : { }, + "me-south-1" : { }, + "sa-east-1" : { }, "us-east-1" : { "variants" : [ { "hostname" : "sms-voice-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] } ] }, + "us-east-2" : { + "variants" : [ { + "hostname" : "sms-voice-fips.us-east-2.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, + "us-west-1" : { + "variants" : [ { + "hostname" : "sms-voice-fips.us-west-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-west-2" : { "variants" : [ { "hostname" : "sms-voice-fips.us-west-2.amazonaws.com", @@ -17580,6 +17775,7 @@ "eu-west-2" : { }, "eu-west-3" : { }, "il-central-1" : { }, + "me-central-1" : { }, "me-south-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -19706,7 +19902,10 @@ "wisdom" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-2" : { }, "fips-us-east-1" : { @@ -19716,7 +19915,10 @@ "deprecated" : true }, "ui-ap-northeast-1" : { }, + "ui-ap-northeast-2" : { }, + "ui-ap-southeast-1" : { }, "ui-ap-southeast-2" : { }, + "ui-ca-central-1" : { }, "ui-eu-central-1" : { }, "ui-eu-west-2" : { }, "ui-us-east-1" : { }, @@ -20614,6 +20816,12 @@ "cn-northwest-1" : { } } }, + "inspector2" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "internetmonitor" : { "defaults" : { "dnsSuffix" : "api.amazonwebservices.com.cn", @@ -20669,6 +20877,23 @@ "cn-north-1" : { } } }, + "iottwinmaker" : { + "endpoints" : { + "api-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "api.iottwinmaker.cn-north-1.amazonaws.com.cn" + }, + "cn-north-1" : { }, + "data-cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "data.iottwinmaker.cn-north-1.amazonaws.com.cn" + } + } + }, "kafka" : { "endpoints" : { "cn-north-1" : { }, @@ -20899,6 +21124,11 @@ } } }, + "quicksight" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "ram" : { "endpoints" : { "cn-north-1" : { }, @@ -20928,24 +21158,6 @@ "cn-north-1" : { } } }, - "resource-explorer-2" : { - "defaults" : { - "dnsSuffix" : "api.amazonwebservices.com.cn", - "variants" : [ { - "dnsSuffix" : "api.amazonwebservices.com.cn", - "hostname" : "{service}-fips.{region}.{dnsSuffix}", - "tags" : [ "fips" ] - } ] - }, - "endpoints" : { - "cn-north-1" : { - "hostname" : "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn" - }, - "cn-northwest-1" : { - "hostname" : "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn" - } - } - }, "resource-groups" : { "endpoints" : { "cn-north-1" : { }, @@ -23163,6 +23375,12 @@ "us-gov-west-1" : { } } }, + "emr-serverless" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "es" : { "endpoints" : { "fips" : { @@ -24846,24 +25064,6 @@ } } }, - "resource-explorer-2" : { - "defaults" : { - "dnsSuffix" : "api.aws", - "variants" : [ { - "dnsSuffix" : "api.aws", - "hostname" : "{service}-fips.{region}.{dnsSuffix}", - "tags" : [ "fips" ] - } ] - }, - "endpoints" : { - "us-gov-east-1" : { - "hostname" : "resource-explorer-2.us-gov-east-1.api.aws" - }, - "us-gov-west-1" : { - "hostname" : "resource-explorer-2.us-gov-west-1.api.aws" - } - } - }, "resource-groups" : { "defaults" : { "variants" : [ { @@ -25459,6 +25659,13 @@ }, "sms-voice" : { "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "deprecated" : true, + "hostname" : "sms-voice-fips.us-gov-east-1.amazonaws.com" + }, "fips-us-gov-west-1" : { "credentialScope" : { "region" : "us-gov-west-1" @@ -25466,6 +25673,12 @@ "deprecated" : true, "hostname" : "sms-voice-fips.us-gov-west-1.amazonaws.com" }, + "us-gov-east-1" : { + "variants" : [ { + "hostname" : "sms-voice-fips.us-gov-east-1.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "us-gov-west-1" : { "variants" : [ { "hostname" : "sms-voice-fips.us-gov-west-1.amazonaws.com", @@ -26183,6 +26396,16 @@ } } }, + "api.pricing" : { + "defaults" : { + "credentialScope" : { + "service" : "pricing" + } + }, + "endpoints" : { + "us-iso-east-1" : { } + } + }, "api.sagemaker" : { "endpoints" : { "us-iso-east-1" : { } @@ -26214,6 +26437,11 @@ "us-iso-west-1" : { } } }, + "arc-zonal-shift" : { + "endpoints" : { + "us-iso-east-1" : { } + } + }, "athena" : { "endpoints" : { "us-iso-east-1" : { } @@ -26272,6 +26500,13 @@ }, "datasync" : { "endpoints" : { + "fips-us-iso-east-1" : { + "credentialScope" : { + "region" : "us-iso-east-1" + }, + "deprecated" : true, + "hostname" : "datasync-fips.us-iso-east-1.c2s.ic.gov" + }, "fips-us-iso-west-1" : { "credentialScope" : { "region" : "us-iso-west-1" @@ -26279,6 +26514,12 @@ "deprecated" : true, "hostname" : "datasync-fips.us-iso-west-1.c2s.ic.gov" }, + "us-iso-east-1" : { + "variants" : [ { + "hostname" : "datasync-fips.us-iso-east-1.c2s.ic.gov", + "tags" : [ "fips" ] + } ] + }, "us-iso-west-1" : { "variants" : [ { "hostname" : "datasync-fips.us-iso-west-1.c2s.ic.gov", @@ -26499,6 +26740,15 @@ "us-iso-east-1" : { } } }, + "guardduty" : { + "defaults" : { + "protocols" : [ "https" ] + }, + "endpoints" : { + "us-iso-east-1" : { } + }, + "isRegionalized" : true + }, "health" : { "endpoints" : { "us-iso-east-1" : { } @@ -26971,6 +27221,16 @@ } } }, + "api.pricing" : { + "defaults" : { + "credentialScope" : { + "service" : "pricing" + } + }, + "endpoints" : { + "us-isob-east-1" : { } + } + }, "api.sagemaker" : { "endpoints" : { "us-isob-east-1" : { } @@ -26993,6 +27253,11 @@ "endpoints" : { "us-isob-east-1" : { } } + }, + "arc-zonal-shift" : { + "endpoints" : { + "us-isob-east-1" : { } + } }, "autoscaling" : { "defaults" : { diff -Nru awscli-2.15.9/awscli/botocore/data/es/2015-01-01/service-2.json awscli-2.15.22/awscli/botocore/data/es/2015-01-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/es/2015-01-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/es/2015-01-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -76,6 +76,23 @@ ], "documentation":"

Provides access to an Amazon OpenSearch Service domain through the use of an interface VPC endpoint.

" }, + "CancelDomainConfigChange":{ + "name":"CancelDomainConfigChange", + "http":{ + "method":"POST", + "requestUri":"/2015-01-01/es/domain/{DomainName}/config/cancel" + }, + "input":{"shape":"CancelDomainConfigChangeRequest"}, + "output":{"shape":"CancelDomainConfigChangeResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Cancels a pending configuration change on an Amazon OpenSearch Service domain.

" + }, "CancelElasticsearchServiceSoftwareUpdate":{ "name":"CancelElasticsearchServiceSoftwareUpdate", "http":{ @@ -1287,6 +1304,41 @@ "exception":true }, "Boolean":{"type":"boolean"}, + "CancelDomainConfigChangeRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "documentation":"

Name of the OpenSearch Service domain configuration request to cancel.

", + "location":"uri", + "locationName":"DomainName" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

When set to True, returns the list of change IDs and properties that will be cancelled without actually cancelling the change.

" + } + }, + "documentation":"

Container for parameters of the CancelDomainConfigChange operation.

" + }, + "CancelDomainConfigChangeResponse":{ + "type":"structure", + "members":{ + "DryRun":{ + "shape":"DryRun", + "documentation":"

Whether or not the request was a dry run. If True, the changes were not actually cancelled.

" + }, + "CancelledChangeIds":{ + "shape":"GUIDList", + "documentation":"

The unique identifiers of the changes that were cancelled.

" + }, + "CancelledChangeProperties":{ + "shape":"CancelledChangePropertyList", + "documentation":"

The domain change properties that were cancelled.

" + } + }, + "documentation":"

Contains the details of the cancelled domain config change.

" + }, "CancelElasticsearchServiceSoftwareUpdateRequest":{ "type":"structure", "required":["DomainName"], @@ -1308,6 +1360,28 @@ }, "documentation":"

The result of a CancelElasticsearchServiceSoftwareUpdate operation. Contains the status of the update.

" }, + "CancelledChangeProperty":{ + "type":"structure", + "members":{ + "PropertyName":{ + "shape":"String", + "documentation":"

The name of the property whose change was cancelled.

" + }, + "CancelledValue":{ + "shape":"String", + "documentation":"

The pending value of the property that was cancelled. This would have been the eventual value of the property if the chance had not been cancelled.

" + }, + "ActiveValue":{ + "shape":"String", + "documentation":"

The current value of the property, after the change was cancelled.

" + } + }, + "documentation":"

A property change that was cancelled for an Amazon OpenSearch Service domain.

" + }, + "CancelledChangePropertyList":{ + "type":"list", + "member":{"shape":"CancelledChangeProperty"} + }, "ChangeProgressDetails":{ "type":"structure", "members":{ @@ -1318,6 +1392,22 @@ "Message":{ "shape":"Message", "documentation":"

Contains an optional message associated with the domain configuration change.

" + }, + "ConfigChangeStatus":{ + "shape":"ConfigChangeStatus", + "documentation":"

The current status of the configuration change.

" + }, + "StartTime":{ + "shape":"UpdateTimestamp", + "documentation":"

The time that the configuration change was initiated, in Universal Coordinated Time (UTC).

" + }, + "LastUpdatedTime":{ + "shape":"UpdateTimestamp", + "documentation":"

The last time that the configuration change was updated.

" + }, + "InitiatedBy":{ + "shape":"InitiatedBy", + "documentation":"

The IAM principal who initiated the configuration change.

" } }, "documentation":"

Specifies change details of the domain configuration change.

" @@ -1389,6 +1479,18 @@ "ChangeProgressStages":{ "shape":"ChangeProgressStageList", "documentation":"

The specific stages that the domain is going through to perform the configuration change.

" + }, + "ConfigChangeStatus":{ + "shape":"ConfigChangeStatus", + "documentation":"

The current status of the configuration change.

" + }, + "LastUpdatedTime":{ + "shape":"UpdateTimestamp", + "documentation":"

The last time that the status of the configuration change was updated.

" + }, + "InitiatedBy":{ + "shape":"InitiatedBy", + "documentation":"

The IAM principal who initiated the configuration change.

" } }, "documentation":"

The progress details of a specific domain configuration change.

" @@ -1472,6 +1574,19 @@ }, "documentation":"

A map from an ElasticsearchVersion to a list of compatible ElasticsearchVersion s to which the domain can be upgraded.

" }, + "ConfigChangeStatus":{ + "type":"string", + "enum":[ + "Pending", + "Initializing", + "Validating", + "ValidationFailed", + "ApplyingChanges", + "Completed", + "PendingUserInput", + "Cancelled" + ] + }, "ConflictException":{ "type":"structure", "members":{ @@ -2417,6 +2532,18 @@ "DISSOCIATION_FAILED" ] }, + "DomainProcessingStatusType":{ + "type":"string", + "enum":[ + "Creating", + "Active", + "Modifying", + "UpgradingEngineVersion", + "UpdatingServiceSoftware", + "Isolated", + "Deleting" + ] + }, "Double":{"type":"double"}, "DryRun":{"type":"boolean"}, "DryRunResults":{ @@ -2696,6 +2823,10 @@ "ChangeProgressDetails":{ "shape":"ChangeProgressDetails", "documentation":"

Specifies change details of the domain configuration change.

" + }, + "ModifyingProperties":{ + "shape":"ModifyingPropertiesList", + "documentation":"

Information about the domain properties that are currently being modified.

" } }, "documentation":"

The configuration of an Elasticsearch domain.

" @@ -2805,6 +2936,14 @@ "ChangeProgressDetails":{ "shape":"ChangeProgressDetails", "documentation":"

Specifies change details of the domain configuration change.

" + }, + "DomainProcessingStatus":{ + "shape":"DomainProcessingStatusType", + "documentation":"

The status of any changes that are currently in progress for the domain.

" + }, + "ModifyingProperties":{ + "shape":"ModifyingPropertiesList", + "documentation":"

Information about the domain properties that are currently being modified.

" } }, "documentation":"

The current status of an Elasticsearch domain.

" @@ -2925,6 +3064,10 @@ "type":"string", "pattern":"\\p{XDigit}{8}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{12}" }, + "GUIDList":{ + "type":"list", + "member":{"shape":"GUID"} + }, "GetCompatibleElasticsearchVersionsRequest":{ "type":"structure", "members":{ @@ -3106,6 +3249,13 @@ "type":"list", "member":{"shape":"InboundCrossClusterSearchConnection"} }, + "InitiatedBy":{ + "type":"string", + "enum":[ + "CUSTOMER", + "SERVICE" + ] + }, "InstanceCount":{ "type":"integer", "documentation":"

Specifies the number of EC2 instances in the Elasticsearch domain.

", @@ -3569,6 +3719,32 @@ "type":"integer", "documentation":"

Minimum number of Instances that can be instantiated for given InstanceType.

" }, + "ModifyingProperties":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the property that is currently being modified.

" + }, + "ActiveValue":{ + "shape":"String", + "documentation":"

The current value of the domain property that is being modified.

" + }, + "PendingValue":{ + "shape":"String", + "documentation":"

The value that the property that is currently being modified will eventually have.

" + }, + "ValueType":{ + "shape":"PropertyValueType", + "documentation":"

The type of value that is currently being modified. Properties can have two types:

" + } + }, + "documentation":"

Information about the domain properties that are currently being modified.

" + }, + "ModifyingPropertiesList":{ + "type":"list", + "member":{"shape":"ModifyingProperties"} + }, "NextToken":{ "type":"string", "documentation":"

Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results.

" @@ -3839,6 +4015,13 @@ "AWS_SERVICE" ] }, + "PropertyValueType":{ + "type":"string", + "enum":[ + "PLAIN_TEXT", + "STRINGIFIED_JSON" + ] + }, "PurchaseReservedElasticsearchInstanceOfferingRequest":{ "type":"structure", "required":[ diff -Nru awscli-2.15.9/awscli/botocore/data/events/2015-10-07/service-2.json awscli-2.15.22/awscli/botocore/data/events/2015-10-07/service-2.json --- awscli-2.15.9/awscli/botocore/data/events/2015-10-07/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/events/2015-10-07/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -960,6 +960,16 @@ "INACTIVE" ] }, + "AppSyncParameters":{ + "type":"structure", + "members":{ + "GraphQLOperation":{ + "shape":"GraphQLOperation", + "documentation":"

The GraphQL operation; that is, the query, mutation, or subscription to be parsed and executed by the GraphQL service.

For more information, see Operations in the AppSync User Guide.

" + } + }, + "documentation":"

Contains the GraphQL operation to be parsed and executed, if the event target is an AppSync API.

" + }, "Archive":{ "type":"structure", "members":{ @@ -2847,6 +2857,12 @@ }, "documentation":"

The failover configuration for an endpoint. This includes what triggers failover and what happens when it's triggered.

" }, + "GraphQLOperation":{ + "type":"string", + "max":1048576, + "min":1, + "sensitive":true + }, "HeaderKey":{ "type":"string", "max":512, @@ -3847,7 +3863,7 @@ }, "State":{ "shape":"RuleState", - "documentation":"

The state of the rule.

Valid values include:

" + "documentation":"

Indicates whether the rule is enabled or disabled.

" }, "Description":{ "shape":"RuleDescription", @@ -4267,7 +4283,7 @@ }, "State":{ "shape":"RuleState", - "documentation":"

The state of the rule.

Valid values include:

" + "documentation":"

The state of the rule.

" }, "Description":{ "shape":"RuleDescription", @@ -4671,6 +4687,10 @@ "RetryPolicy":{ "shape":"RetryPolicy", "documentation":"

The RetryPolicy object that contains the retry policy configuration to use for the dead-letter queue.

" + }, + "AppSyncParameters":{ + "shape":"AppSyncParameters", + "documentation":"

Contains the GraphQL operation to be parsed and executed, if the event target is an AppSync API.

" } }, "documentation":"

Targets are the resources to be invoked when a rule is triggered. For a complete list of services and resources that can be set as a target, see PutTargets.

If you are setting the event bus of another account as the target, and that account granted permission to your account through an organization instead of directly by the account ID, then you must specify a RoleArn with proper permissions in the Target structure. For more information, see Sending and Receiving Events Between Amazon Web Services Accounts in the Amazon EventBridge User Guide.

" diff -Nru awscli-2.15.9/awscli/botocore/data/finspace/2021-03-12/service-2.json awscli-2.15.22/awscli/botocore/data/finspace/2021-03-12/service-2.json --- awscli-2.15.9/awscli/botocore/data/finspace/2021-03-12/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/finspace/2021-03-12/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -3347,13 +3347,13 @@ "type":"string", "max":50, "min":1, - "pattern":"^(?![Aa][Ww][Ss])(s|([a-zA-Z][a-zA-Z0-9_]+))" + "pattern":"^(?![Aa][Ww][Ss])(s|([a-zA-Z][a-zA-Z0-9_]+))|(AWS_ZIP_DEFAULT)" }, "KxCommandLineArgumentValue":{ "type":"string", "max":50, "min":1, - "pattern":"^[a-zA-Z0-9_:./]+$" + "pattern":"^[a-zA-Z0-9_:./,]+$" }, "KxCommandLineArguments":{ "type":"list", diff -Nru awscli-2.15.9/awscli/botocore/data/firehose/2015-08-04/service-2.json awscli-2.15.22/awscli/botocore/data/firehose/2015-08-04/service-2.json --- awscli-2.15.9/awscli/botocore/data/firehose/2015-08-04/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/firehose/2015-08-04/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -27,7 +27,7 @@ {"shape":"ResourceInUseException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

Creates a Kinesis Data Firehose delivery stream.

By default, you can create up to 50 delivery streams per Amazon Web Services Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.

" + "documentation":"

Creates a Firehose delivery stream.

By default, you can create up to 50 delivery streams per Amazon Web Services Region.

This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream.

If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

A Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter.

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled.

A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration.

When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3.

A few notes about Amazon Redshift as a destination:

Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

" }, "DeleteDeliveryStream":{ "name":"DeleteDeliveryStream", @@ -41,7 +41,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes a delivery stream and its data.

To check the state of a delivery stream, use DescribeDeliveryStream. You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. While the deletion request is in process, the delivery stream is in the DELETING state.

While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

" + "documentation":"

Deletes a delivery stream and its data.

You can delete a delivery stream only if it is in one of the following states: ACTIVE, DELETING, CREATING_FAILED, or DELETING_FAILED. You can't delete a delivery stream that is in the CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream.

DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the DELETING state.While the delivery stream is in the DELETING state, the service might continue to accept records, but it doesn't make any guarantees with respect to delivering the data. Therefore, as a best practice, first stop any applications that are sending records before you delete a delivery stream.

Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state to be removed.

" }, "DescribeDeliveryStream":{ "name":"DescribeDeliveryStream", @@ -96,7 +96,7 @@ {"shape":"InvalidSourceException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Writes a single data record into an Amazon Kinesis Data Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

" + "documentation":"

Writes a single data record into an Amazon Firehose delivery stream. To write multiple data records into a delivery stream, use PutRecordBatch. Applications using these operations are referred to as producers.

By default, each delivery stream can take in up to 2,000 transactions per second, 5,000 records per second, or 5 MB per second. If you use PutRecord and PutRecordBatch, the limits are an aggregate across these two operations for each delivery stream. For more information about limits and how to request an increase, see Amazon Firehose Limits.

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KiB in size, and any kind of data. For example, it can be a segment from a log file, geographic location data, website clickstream data, and so on.

Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecord operation returns a RecordId, which is a unique string assigned to each record. Producer applications can use this ID for purposes such as auditability and investigation.

If the PutRecord operation throws a ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it tries to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

" }, "PutRecordBatch":{ "name":"PutRecordBatch", @@ -113,7 +113,7 @@ {"shape":"InvalidSourceException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

Kinesis Data Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

For information about service quota, see Amazon Kinesis Data Firehose Quota.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Kinesis Data Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Kinesis Data Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Kinesis Data Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

" + "documentation":"

Writes multiple data records into a delivery stream in a single call, which can achieve higher throughput per producer than when writing single records. To write single data records into a delivery stream, use PutRecord. Applications using these operations are referred to as producers.

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

For information about service quota, see Amazon Firehose Quota.

Each PutRecordBatch request supports up to 500 records. Each record in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB for the entire request. These limits cannot be changed.

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 KB in size, and any kind of data. For example, it could be a segment from a log file, geographic location data, website clickstream data, and so on.

Firehose buffers records before delivering them to the destination. To disambiguate the data blobs at the destination, a common solution is to use delimiters in the data, such as a newline (\\n) or some other character unique within the data. This allows the consumer application to parse individual data items when reading the data from the destination.

The PutRecordBatch response includes a count of failed records, FailedPutCount, and an array of responses, RequestResponses. Even if the PutRecordBatch call succeeds, the value of FailedPutCount may be greater than 0, indicating that there are records for which the operation didn't succeed. Each entry in the RequestResponses array provides additional information about the processed record. It directly correlates with a record in the request array using the same ordering, from the top to the bottom. The response array always includes the same number of records as the request array. RequestResponses includes both successfully and unsuccessfully processed records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing of subsequent records.

A successfully processed record includes a RecordId value, which is unique for the record. An unsuccessfully processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is one of the following values: ServiceUnavailableException or InternalFailure. ErrorMessage provides more detailed information about the error.

If there is an internal server error or a timeout, the write might have completed or it might have failed. If FailedPutCount is greater than 0, retry the request, resending only those records that might have failed processing. This minimizes the possible duplicate records and also reduces the total bytes sent (and corresponding charges). We recommend that you handle any duplicates at the destination.

If PutRecordBatch throws ServiceUnavailableException, the API is automatically reinvoked (retried) 3 times. If the exception persists, it is possible that the throughput limits have been exceeded for the delivery stream.

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can result in data duplicates. For larger data assets, allow for a longer time out before retrying Put API operations.

Data records sent to Firehose are stored for 24 hours from the time they are added to a delivery stream as it attempts to send the records to the destination. If the destination is unreachable for more than 24 hours, the data is no longer available.

Don't concatenate two or more base64 strings to form the data fields of your records. Instead, concatenate the raw data, then perform base64 encoding.

" }, "StartDeliveryStreamEncryption":{ "name":"StartDeliveryStreamEncryption", @@ -130,7 +130,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidKMSResourceException"} ], - "documentation":"

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

For the KMS grant creation to be successful, Kinesis Data Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" + "documentation":"

Enables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then to ENABLED. The encryption status of a delivery stream is the Status property in DeliveryStreamEncryptionConfiguration. If the operation fails, the encryption status changes to ENABLING_FAILED. You can continue to read and write data to your delivery stream while the encryption status is ENABLING, but the data is not encrypted. It can take up to 5 seconds after the encryption status changes to ENABLED before all records written to the delivery stream are encrypted. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

Even if encryption is currently enabled for a delivery stream, you can still invoke this operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, Firehose schedules the grant it had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant that enables it to use the new CMK to encrypt and decrypt data and to manage the grant.

For the KMS grant creation to be successful, Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream should not be called with session credentials that are more than 6 hours old.

If a delivery stream already has encryption enabled and then you invoke this operation to change the ARN of the CMK or both its type and ARN and you get ENABLING_FAILED, this only means that the attempt to change the CMK failed. In this case, encryption remains enabled with the old CMK.

If the encryption status of your delivery stream is ENABLING_FAILED, you can invoke this operation again with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly deny the permission for Firehose to invoke KMS encrypt and decrypt operations.

You can enable SSE for a delivery stream only if it's a delivery stream that uses DirectPut as its source.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" }, "StopDeliveryStreamEncryption":{ "name":"StopDeliveryStreamEncryption", @@ -146,7 +146,7 @@ {"shape":"InvalidArgumentException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Kinesis Data Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" + "documentation":"

Disables server-side encryption (SSE) for the delivery stream.

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then to DISABLED. You can continue to read and write data to your stream while its status is DISABLING. It can take up to 5 seconds after the encryption status changes to DISABLED before all records written to the delivery stream are no longer subject to encryption. To find out whether a record or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted and PutRecordBatchOutput$Encrypted, respectively.

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

If SSE is enabled using a customer managed CMK and then you invoke StopDeliveryStreamEncryption, Firehose schedules the related KMS grant for retirement and then retires it after it ensures that it is finished delivering records to the destination.

The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have a combined limit of 25 calls per delivery stream per 24 hours. For example, you reach the limit if you call StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour period.

" }, "TagDeliveryStream":{ "name":"TagDeliveryStream", @@ -194,7 +194,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified.

Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" + "documentation":"

Updates the specified destination of the specified delivery stream.

Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes.

Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination.

If the destination type is the same, Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination.

If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this case, all parameters must be specified.

Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.

" } }, "shapes":{ @@ -244,7 +244,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

" }, "CollectionEndpoint":{ "shape":"AmazonOpenSearchServerlessCollectionEndpoint", @@ -260,11 +260,11 @@ }, "RetryOptions":{ "shape":"AmazonOpenSearchServerlessRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" }, "S3BackupMode":{ "shape":"AmazonOpenSearchServerlessS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

" }, "S3Configuration":{"shape":"S3DestinationConfiguration"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, @@ -312,7 +312,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Serverless offering for Amazon OpenSearch Service Configuration API and for indexing documents.

" }, "CollectionEndpoint":{ "shape":"AmazonOpenSearchServerlessCollectionEndpoint", @@ -328,7 +328,7 @@ }, "RetryOptions":{ "shape":"AmazonOpenSearchServerlessRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 minutes).

" }, "S3Update":{"shape":"S3DestinationUpdate"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, @@ -352,10 +352,10 @@ "members":{ "DurationInSeconds":{ "shape":"AmazonOpenSearchServerlessRetryDurationInSeconds", - "documentation":"

After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "documentation":"

After an initial failure to deliver to the Serverless offering for Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, - "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.

" + "documentation":"

Configures retry behavior in case Firehose is unable to deliver documents to the Serverless offering for Amazon OpenSearch Service.

" }, "AmazonOpenSearchServerlessS3BackupMode":{ "type":"string", @@ -404,7 +404,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

" }, "DomainARN":{ "shape":"AmazonopensearchserviceDomainARN", @@ -420,7 +420,7 @@ }, "TypeName":{ "shape":"AmazonopensearchserviceTypeName", - "documentation":"

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

" + "documentation":"

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.

" }, "IndexRotationPeriod":{ "shape":"AmazonopensearchserviceIndexRotationPeriod", @@ -432,11 +432,11 @@ }, "RetryOptions":{ "shape":"AmazonopensearchserviceRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" }, "S3BackupMode":{ "shape":"AmazonopensearchserviceS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix.

" }, "S3Configuration":{"shape":"S3DestinationConfiguration"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, @@ -444,7 +444,7 @@ "VpcConfiguration":{"shape":"VpcConfiguration"}, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "documentation":"

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" } }, "documentation":"

Describes the configuration of a destination in Amazon OpenSearch Service

" @@ -462,7 +462,7 @@ }, "ClusterEndpoint":{ "shape":"AmazonopensearchserviceClusterEndpoint", - "documentation":"

The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.

" + "documentation":"

The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch Service.

" }, "IndexName":{ "shape":"AmazonopensearchserviceIndexName", @@ -494,7 +494,7 @@ "VpcConfigurationDescription":{"shape":"VpcConfigurationDescription"}, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "documentation":"

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" } }, "documentation":"

The destination description in Amazon OpenSearch Service.

" @@ -504,7 +504,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon OpenSearch Service Configuration API and for indexing documents.

" }, "DomainARN":{ "shape":"AmazonopensearchserviceDomainARN", @@ -520,7 +520,7 @@ }, "TypeName":{ "shape":"AmazonopensearchserviceTypeName", - "documentation":"

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" + "documentation":"

The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" }, "IndexRotationPeriod":{ "shape":"AmazonopensearchserviceIndexRotationPeriod", @@ -532,14 +532,14 @@ }, "RetryOptions":{ "shape":"AmazonopensearchserviceRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service. The default value is 300 (5 minutes).

" }, "S3Update":{"shape":"S3DestinationUpdate"}, "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "documentation":"

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" } }, "documentation":"

Describes an update for a destination in Amazon OpenSearch Service.

" @@ -576,10 +576,10 @@ "members":{ "DurationInSeconds":{ "shape":"AmazonopensearchserviceRetryDurationInSeconds", - "documentation":"

After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "documentation":"

After an initial failure to deliver to Amazon OpenSearch Service, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, - "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon OpenSearch Service.

" + "documentation":"

Configures retry behavior in case Firehose is unable to deliver documents to Amazon OpenSearch Service.

" }, "AmazonopensearchserviceS3BackupMode":{ "type":"string", @@ -635,7 +635,7 @@ "documentation":"

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300. This parameter is optional but if you specify a value for it, you must also specify a value for SizeInMBs, and vice versa.

" } }, - "documentation":"

Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Kinesis Data Firehose might choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" + "documentation":"

Describes hints for the buffering to perform before delivering data to the destination. These options are treated as hints, and therefore Firehose might choose to use different values when it is optimal. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" }, "CloudWatchLoggingOptions":{ "type":"structure", @@ -715,7 +715,7 @@ }, "CopyOptions":{ "shape":"CopyOptions", - "documentation":"

Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Kinesis Data Firehose are as follows:

delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).

delimiter '|' escape - the delimiter should be escaped.

fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

For more examples, see Amazon Redshift COPY command examples.

" + "documentation":"

Optional parameters to use with the Amazon Redshift COPY command. For more information, see the \"Optional Parameters\" section of Amazon Redshift COPY command. Some possible examples that would apply to Firehose are as follows:

delimiter '\\t' lzop; - fields are delimited with \"\\t\" (TAB character) and compressed using lzop.

delimiter '|' - fields are delimited with \"|\" (this is the default delimiter).

delimiter '|' escape - the delimiter should be escaped.

fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are fixed width in the source, with each width specified after every column in the table.

JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path specified is the format of the data.

For more examples, see Amazon Redshift COPY command examples.

" } }, "documentation":"

Describes a COPY command for Amazon Redshift.

" @@ -783,7 +783,11 @@ "shape":"AmazonOpenSearchServerlessDestinationConfiguration", "documentation":"

The destination in the Serverless offering for Amazon OpenSearch Service. You can specify only one destination.

" }, - "MSKSourceConfiguration":{"shape":"MSKSourceConfiguration"} + "MSKSourceConfiguration":{"shape":"MSKSourceConfiguration"}, + "SnowflakeDestinationConfiguration":{ + "shape":"SnowflakeDestinationConfiguration", + "documentation":"

Configure Snowflake destination

" + } } }, "CreateDeliveryStreamOutput":{ @@ -795,6 +799,11 @@ } } }, + "CustomTimeZone":{ + "type":"string", + "max":50, + "min":0 + }, "Data":{ "type":"blob", "max":1024000, @@ -809,18 +818,18 @@ }, "InputFormatConfiguration":{ "shape":"InputFormatConfiguration", - "documentation":"

Specifies the deserializer that you want Kinesis Data Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

" + "documentation":"

Specifies the deserializer that you want Firehose to use to convert the format of your data from JSON. This parameter is required if Enabled is set to true.

" }, "OutputFormatConfiguration":{ "shape":"OutputFormatConfiguration", - "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

" + "documentation":"

Specifies the serializer that you want Firehose to use to convert the format of your data to the Parquet or ORC format. This parameter is required if Enabled is set to true.

" }, "Enabled":{ "shape":"BooleanObject", "documentation":"

Defaults to true. Set it to false if you want to disable format conversion while preserving the configuration details.

" } }, - "documentation":"

Specifies that you want Kinesis Data Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Kinesis Data Firehose Record Format Conversion.

" + "documentation":"

Specifies that you want Firehose to convert data from the JSON format to the Parquet or ORC format before writing it to Amazon S3. Firehose uses the serializer and deserializer that you specify, in addition to the column information from the Amazon Web Services Glue table, to deserialize your input data from JSON and then serialize it to the Parquet or ORC format. For more information, see Firehose Record Format Conversion.

" }, "DataTableColumns":{ "type":"string", @@ -851,7 +860,7 @@ }, "AllowForceDelete":{ "shape":"BooleanObject", - "documentation":"

Set this to true if you want to delete the delivery stream even if Kinesis Data Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Kinesis Data Firehose keeps retrying the delete operation.

The default value is false.

" + "documentation":"

Set this to true if you want to delete the delivery stream even if Firehose is unable to retire the grant for the CMK. Firehose might be unable to retire the grant due to a customer error, such as when the CMK or the grant are in an invalid state. If you force deletion, you can then use the RevokeGrant operation to revoke the grant you gave to Firehose. If a failure to retire the grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the delete operation.

The default value is false.

" } } }, @@ -958,11 +967,11 @@ "members":{ "KeyARN":{ "shape":"AWSKMSKeyARN", - "documentation":"

If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, Kinesis Data Firehose uses a service-account CMK.

" + "documentation":"

If you set KeyType to CUSTOMER_MANAGED_CMK, you must specify the Amazon Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web Services_OWNED_CMK, Firehose uses a service-account CMK.

" }, "KeyType":{ "shape":"KeyType", - "documentation":"

Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Kinesis Data Firehose service to use the customer managed CMK to perform encryption and decryption. Kinesis Data Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis Data Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Kinesis Data Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

" + "documentation":"

Indicates the type of customer master key (CMK) to use for encryption. The default setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation CreateGrant to create a grant that allows the Firehose service to use the customer managed CMK to perform encryption and decryption. Firehose manages that grant.

When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream that is encrypted with a customer managed CMK, Firehose schedules the grant it had on the old CMK for retirement.

You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this limit, Firehose throws a LimitExceededException.

To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management Service developer guide.

" } }, "documentation":"

Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).

" @@ -1046,7 +1055,7 @@ }, "ExclusiveStartDestinationId":{ "shape":"DestinationId", - "documentation":"

The ID of the destination to start returning the destination information. Kinesis Data Firehose supports one destination per delivery stream.

" + "documentation":"

The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.

" } } }, @@ -1070,14 +1079,14 @@ "members":{ "OpenXJsonSerDe":{ "shape":"OpenXJsonSerDe", - "documentation":"

The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" + "documentation":"

The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" }, "HiveJsonSerDe":{ "shape":"HiveJsonSerDe", - "documentation":"

The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" + "documentation":"

The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" } }, - "documentation":"

The deserializer you want Kinesis Data Firehose to use for converting the input data from JSON. Kinesis Data Firehose then serializes the data to its final format using the Serializer. Kinesis Data Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.

" + "documentation":"

The deserializer you want Firehose to use for converting the input data from JSON. Firehose then serializes the data to its final format using the Serializer. Firehose supports two types of deserializers: the Apache Hive JSON SerDe and the OpenX JSON SerDe.

" }, "DestinationDescription":{ "type":"structure", @@ -1115,6 +1124,10 @@ "shape":"HttpEndpointDestinationDescription", "documentation":"

Describes the specified HTTP endpoint destination.

" }, + "SnowflakeDestinationDescription":{ + "shape":"SnowflakeDestinationDescription", + "documentation":"

Optional description for the destination

" + }, "AmazonOpenSearchServerlessDestinationDescription":{ "shape":"AmazonOpenSearchServerlessDestinationDescription", "documentation":"

The destination in the Serverless offering for Amazon OpenSearch Service.

" @@ -1138,21 +1151,21 @@ "members":{ "DefaultDocumentIdFormat":{ "shape":"DefaultDocumentIdFormat", - "documentation":"

When the FIREHOSE_DEFAULT option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.

When the NO_DOCUMENT_ID option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.

" + "documentation":"

When the FIREHOSE_DEFAULT option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.

When the NO_DOCUMENT_ID option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.

" } }, - "documentation":"

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "documentation":"

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" }, "DynamicPartitioningConfiguration":{ "type":"structure", "members":{ "RetryOptions":{ "shape":"RetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.

" + "documentation":"

The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.

" }, "Enabled":{ "shape":"BooleanObject", - "documentation":"

Specifies that the dynamic partitioning is enabled for this Kinesis Data Firehose delivery stream.

" + "documentation":"

Specifies that the dynamic partitioning is enabled for this Firehose delivery stream.

" } }, "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" @@ -1197,7 +1210,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", @@ -1213,7 +1226,7 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during run time.

For Elasticsearch 7.x, don't specify a TypeName.

" + "documentation":"

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during run time.

For Elasticsearch 7.x, don't specify a TypeName.

" }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", @@ -1225,11 +1238,11 @@ }, "RetryOptions":{ "shape":"ElasticsearchRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" }, "S3BackupMode":{ "shape":"ElasticsearchS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

You can't change this backup mode after you create the delivery stream.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When it is set to FailedDocumentsOnly, Firehose writes any documents that could not be indexed to the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ appended to the key prefix. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents with AmazonOpenSearchService-failed/ appended to the prefix. For more information, see Amazon S3 Backup for the Amazon ES Destination. Default value is FailedDocumentsOnly.

You can't change this backup mode after you create the delivery stream.

" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -1249,7 +1262,7 @@ }, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "documentation":"

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" } }, "documentation":"

Describes the configuration of a destination in Amazon ES.

" @@ -1263,11 +1276,11 @@ }, "DomainARN":{ "shape":"ElasticsearchDomainARN", - "documentation":"

The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

" + "documentation":"

The ARN of the Amazon ES domain. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES.

" }, "ClusterEndpoint":{ "shape":"ElasticsearchClusterEndpoint", - "documentation":"

The endpoint to use when communicating with the cluster. Kinesis Data Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.

" + "documentation":"

The endpoint to use when communicating with the cluster. Firehose uses either this ClusterEndpoint or the DomainARN field to send data to Amazon ES.

" }, "IndexName":{ "shape":"ElasticsearchIndexName", @@ -1311,7 +1324,7 @@ }, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "documentation":"

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" } }, "documentation":"

The destination description in Amazon ES.

" @@ -1321,7 +1334,7 @@ "members":{ "RoleARN":{ "shape":"RoleARN", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. For more information, see Grant Firehose Access to an Amazon S3 Destination and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces.

" }, "DomainARN":{ "shape":"ElasticsearchDomainARN", @@ -1337,7 +1350,7 @@ }, "TypeName":{ "shape":"ElasticsearchTypeName", - "documentation":"

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Kinesis Data Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" + "documentation":"

The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per index. If you try to specify a new type for an existing index that already has another type, Firehose returns an error during runtime.

If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose still delivers data to Elasticsearch with the old index name and type name. If you want to update your delivery stream with a new index name, provide an empty string for TypeName.

" }, "IndexRotationPeriod":{ "shape":"ElasticsearchIndexRotationPeriod", @@ -1349,7 +1362,7 @@ }, "RetryOptions":{ "shape":"ElasticsearchRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to Amazon ES. The default value is 300 (5 minutes).

" }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -1365,7 +1378,7 @@ }, "DocumentIdOptions":{ "shape":"DocumentIdOptions", - "documentation":"

Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.

" + "documentation":"

Indicates the method for setting up document ID. The supported methods are Firehose generated document ID and OpenSearch Service generated document ID.

" } }, "documentation":"

Describes an update for a destination in Amazon ES.

" @@ -1402,10 +1415,10 @@ "members":{ "DurationInSeconds":{ "shape":"ElasticsearchRetryDurationInSeconds", - "documentation":"

After an initial failure to deliver to Amazon ES, the total amount of time during which Kinesis Data Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" + "documentation":"

After an initial failure to deliver to Amazon ES, the total amount of time during which Firehose retries delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results in no retries.

" } }, - "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon ES.

" + "documentation":"

Configures retry behavior in case Firehose is unable to deliver documents to Amazon ES.

" }, "ElasticsearchS3BackupMode":{ "type":"string", @@ -1463,7 +1476,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" + "documentation":"

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -1500,6 +1513,14 @@ "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" + }, + "FileExtension":{ + "shape":"FileExtension", + "documentation":"

Specify a file extension. It will override the default file extension

" + }, + "CustomTimeZone":{ + "shape":"CustomTimeZone", + "documentation":"

The time zone you prefer. UTC is the default.

" } }, "documentation":"

Describes the configuration of a destination in Amazon S3.

" @@ -1528,7 +1549,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" + "documentation":"

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -1565,6 +1586,14 @@ "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" + }, + "FileExtension":{ + "shape":"FileExtension", + "documentation":"

Specify a file extension. It will override the default file extension

" + }, + "CustomTimeZone":{ + "shape":"CustomTimeZone", + "documentation":"

The time zone you prefer. UTC is the default.

" } }, "documentation":"

Describes a destination in Amazon S3.

" @@ -1586,7 +1615,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" + "documentation":"

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -1623,6 +1652,14 @@ "DynamicPartitioningConfiguration":{ "shape":"DynamicPartitioningConfiguration", "documentation":"

The configuration of the dynamic partitioning mechanism that creates smaller data sets from the streaming data by partitioning it based on partition keys. Currently, dynamic partitioning is only supported for Amazon S3 destinations.

" + }, + "FileExtension":{ + "shape":"FileExtension", + "documentation":"

Specify a file extension. It will override the default file extension

" + }, + "CustomTimeZone":{ + "shape":"CustomTimeZone", + "documentation":"

The time zone you prefer. UTC is the default.

" } }, "documentation":"

Describes an update for a destination in Amazon S3.

" @@ -1645,6 +1682,12 @@ }, "documentation":"

Provides details in case one of the following operations fails due to an error related to KMS: CreateDeliveryStream, DeleteDeliveryStream, StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

" }, + "FileExtension":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^$|\\.[0-9a-z!\\-_.*'()]+" + }, "HECAcknowledgmentTimeoutInSeconds":{ "type":"integer", "max":600, @@ -1674,10 +1717,10 @@ "members":{ "TimestampFormats":{ "shape":"ListOfNonEmptyStrings", - "documentation":"

Indicates how you want Kinesis Data Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf by default.

" + "documentation":"

Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see Class DateTimeFormat. You can also use the special value millis to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses java.sql.Timestamp::valueOf by default.

" } }, - "documentation":"

The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" + "documentation":"

The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.

" }, "HttpEndpointAccessKey":{ "type":"string", @@ -1712,7 +1755,7 @@ "documentation":"

Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).

" } }, - "documentation":"

Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" + "documentation":"

Describes the buffering options that can be applied before data is delivered to the HTTP endpoint destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" }, "HttpEndpointBufferingIntervalInSeconds":{ "type":"integer", @@ -1794,7 +1837,7 @@ }, "BufferingHints":{ "shape":"HttpEndpointBufferingHints", - "documentation":"

The buffering options that can be used before data is delivered to the specified destination. Kinesis Data Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.

" + "documentation":"

The buffering options that can be used before data is delivered to the specified destination. Firehose treats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if you specify a value for one of them, you must also provide a value for the other.

" }, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "RequestConfiguration":{ @@ -1804,15 +1847,15 @@ "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

" + "documentation":"

Firehose uses this IAM role for all the permissions that the delivery stream needs.

" }, "RetryOptions":{ "shape":"HttpEndpointRetryOptions", - "documentation":"

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" + "documentation":"

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" }, "S3BackupMode":{ "shape":"HttpEndpointS3BackupMode", - "documentation":"

Describes the S3 bucket backup options for the data that Kinesis Data Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" + "documentation":"

Describes the S3 bucket backup options for the data that Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" }, "S3Configuration":{"shape":"S3DestinationConfiguration"} }, @@ -1827,7 +1870,7 @@ }, "BufferingHints":{ "shape":"HttpEndpointBufferingHints", - "documentation":"

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" + "documentation":"

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" }, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "RequestConfiguration":{ @@ -1837,15 +1880,15 @@ "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

" + "documentation":"

Firehose uses this IAM role for all the permissions that the delivery stream needs.

" }, "RetryOptions":{ "shape":"HttpEndpointRetryOptions", - "documentation":"

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" + "documentation":"

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" }, "S3BackupMode":{ "shape":"HttpEndpointS3BackupMode", - "documentation":"

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" + "documentation":"

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" }, "S3DestinationDescription":{"shape":"S3DestinationDescription"} }, @@ -1860,7 +1903,7 @@ }, "BufferingHints":{ "shape":"HttpEndpointBufferingHints", - "documentation":"

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" + "documentation":"

Describes buffering options that can be applied to the data before it is delivered to the HTTPS endpoint destination. Firehose teats these options as hints, and it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds parameters are optional. However, if specify a value for one of them, you must also provide a value for the other.

" }, "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, "RequestConfiguration":{ @@ -1870,15 +1913,15 @@ "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, "RoleARN":{ "shape":"RoleARN", - "documentation":"

Kinesis Data Firehose uses this IAM role for all the permissions that the delivery stream needs.

" + "documentation":"

Firehose uses this IAM role for all the permissions that the delivery stream needs.

" }, "RetryOptions":{ "shape":"HttpEndpointRetryOptions", - "documentation":"

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" + "documentation":"

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" }, "S3BackupMode":{ "shape":"HttpEndpointS3BackupMode", - "documentation":"

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Kinesis Data Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" + "documentation":"

Describes the S3 bucket backup options for the data that Kinesis Firehose delivers to the HTTP endpoint destination. You can back up all documents (AllData) or only the documents that Firehose could not deliver to the specified HTTP endpoint destination (FailedDataOnly).

" }, "S3Update":{"shape":"S3DestinationUpdate"} }, @@ -1895,7 +1938,7 @@ "members":{ "ContentEncoding":{ "shape":"ContentEncoding", - "documentation":"

Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.

" + "documentation":"

Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation.

" }, "CommonAttributes":{ "shape":"HttpEndpointCommonAttributesList", @@ -1914,10 +1957,10 @@ "members":{ "DurationInSeconds":{ "shape":"HttpEndpointRetryDurationInSeconds", - "documentation":"

The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from the specified destination after each attempt.

" + "documentation":"

The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails. It doesn't include the periods during which Firehose waits for acknowledgment from the specified destination after each attempt.

" } }, - "documentation":"

Describes the retry behavior in case Kinesis Data Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" + "documentation":"

Describes the retry behavior in case Firehose is unable to deliver data to the specified HTTP endpoint destination, or if it doesn't receive a valid acknowledgment of receipt from the specified HTTP endpoint destination.

" }, "HttpEndpointS3BackupMode":{ "type":"string", @@ -1965,7 +2008,7 @@ "code":{"shape":"ErrorCode"}, "message":{"shape":"ErrorMessage"} }, - "documentation":"

Kinesis Data Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

", + "documentation":"

Firehose throws this exception when an attempt to put records or to start or stop delivery stream encryption fails. This happens when the KMS service throws one of the following exception types: AccessDeniedException, InvalidStateException, DisabledException, or NotFoundException.

", "exception":true }, "InvalidSourceException":{ @@ -2032,10 +2075,10 @@ }, "DeliveryStartTimestamp":{ "shape":"DeliveryStartTimestamp", - "documentation":"

Kinesis Data Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

" + "documentation":"

Firehose starts retrieving records from the Kinesis data stream starting with this timestamp.

" } }, - "documentation":"

Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

" + "documentation":"

Details about a Kinesis data stream used as the source for a Firehose delivery stream.

" }, "LimitExceededException":{ "type":"structure", @@ -2199,10 +2242,10 @@ }, "DeliveryStartTimestamp":{ "shape":"DeliveryStartTimestamp", - "documentation":"

Kinesis Data Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

" + "documentation":"

Firehose starts retrieving records from the topic within the Amazon MSK cluster starting with this timestamp.

" } }, - "documentation":"

Details about the Amazon MSK cluster used as the source for a Kinesis Data Firehose delivery stream.

" + "documentation":"

Details about the Amazon MSK cluster used as the source for a Firehose delivery stream.

" }, "NoEncryptionConfig":{ "type":"string", @@ -2229,18 +2272,18 @@ "members":{ "ConvertDotsInJsonKeysToUnderscores":{ "shape":"BooleanObject", - "documentation":"

When set to true, specifies that the names of the keys include dots and that you want Kinesis Data Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.

The default is false.

" + "documentation":"

When set to true, specifies that the names of the keys include dots and that you want Firehose to replace them with underscores. This is useful because Apache Hive does not allow dots in column names. For example, if the JSON contains a key whose name is \"a.b\", you can define the column name to be \"a_b\" when using this option.

The default is false.

" }, "CaseInsensitive":{ "shape":"BooleanObject", - "documentation":"

When set to true, which is the default, Kinesis Data Firehose converts JSON keys to lowercase before deserializing them.

" + "documentation":"

When set to true, which is the default, Firehose converts JSON keys to lowercase before deserializing them.

" }, "ColumnToJsonKeyMappings":{ "shape":"ColumnToJsonKeyMappings", "documentation":"

Maps column names to JSON keys that aren't identical to the column names. This is useful when the JSON contains keys that are Hive keywords. For example, timestamp is a Hive keyword. If you have a JSON key named timestamp, set this parameter to {\"ts\": \"timestamp\"} to map this key to a column named ts.

" } }, - "documentation":"

The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" + "documentation":"

The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe.

" }, "OrcCompression":{ "type":"string", @@ -2270,7 +2313,7 @@ }, "BlockSizeBytes":{ "shape":"BlockSizeBytes", - "documentation":"

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

" + "documentation":"

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.

" }, "RowIndexStride":{ "shape":"OrcRowIndexStride", @@ -2282,7 +2325,7 @@ }, "PaddingTolerance":{ "shape":"Proportion", - "documentation":"

A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.

For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.

Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding is false.

" + "documentation":"

A number between 0 and 1 that defines the tolerance for block padding as a decimal fraction of stripe size. The default value is 0.05, which means 5 percent of stripe size.

For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB for padding within the 256 MiB block. In such a case, if the available size within the block is more than 3.2 MiB, a new, smaller stripe is inserted to fit within that space. This ensures that no stripe crosses block boundaries and causes remote reads within a node-local task.

Firehose ignores this parameter when OrcSerDe$EnablePadding is false.

" }, "Compression":{ "shape":"OrcCompression", @@ -2290,7 +2333,7 @@ }, "BloomFilterColumns":{ "shape":"ListOfNonEmptyStringsWithoutWhitespace", - "documentation":"

The column names for which you want Kinesis Data Firehose to create bloom filters. The default is null.

" + "documentation":"

The column names for which you want Firehose to create bloom filters. The default is null.

" }, "BloomFilterFalsePositiveProbability":{ "shape":"Proportion", @@ -2319,7 +2362,7 @@ "documentation":"

Specifies which serializer to use. You can choose either the ORC SerDe or the Parquet SerDe. If both are non-null, the server rejects the request.

" } }, - "documentation":"

Specifies the serializer that you want Kinesis Data Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

" + "documentation":"

Specifies the serializer that you want Firehose to use to convert the format of your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

" }, "ParquetCompression":{ "type":"string", @@ -2338,7 +2381,7 @@ "members":{ "BlockSizeBytes":{ "shape":"BlockSizeBytes", - "documentation":"

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for padding calculations.

" + "documentation":"

The Hadoop Distributed File System (HDFS) block size. This is useful if you intend to copy the data from Amazon S3 to HDFS before querying. The default is 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding calculations.

" }, "PageSizeBytes":{ "shape":"ParquetPageSizeBytes", @@ -2410,7 +2453,7 @@ "documentation":"

The processor parameters.

" } }, - "documentation":"

Describes a data processor.

" + "documentation":"

Describes a data processor.

If you want to add a new line delimiter between records in objects that are delivered to Amazon S3, choose AppendDelimiterToRecord as a processor type. You don’t have to put a processor parameter when you select AppendDelimiterToRecord.

" }, "ProcessorList":{ "type":"list", @@ -2450,7 +2493,8 @@ "BufferIntervalInSeconds", "SubRecordType", "Delimiter", - "CompressionFormat" + "CompressionFormat", + "DataMessageExtraction" ] }, "ProcessorParameterValue":{ @@ -2464,6 +2508,7 @@ "enum":[ "RecordDeAggregation", "Decompression", + "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord" @@ -2621,7 +2666,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -2674,7 +2719,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" }, "S3DestinationDescription":{ "shape":"S3DestinationDescription", @@ -2724,7 +2769,7 @@ }, "RetryOptions":{ "shape":"RedshiftRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" + "documentation":"

The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).

" }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -2759,10 +2804,10 @@ "members":{ "DurationInSeconds":{ "shape":"RedshiftRetryDurationInSeconds", - "documentation":"

The length of time during which Kinesis Data Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

" + "documentation":"

The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.

" } }, - "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Amazon Redshift.

" + "documentation":"

Configures retry behavior in case Firehose is unable to deliver documents to Amazon Redshift.

" }, "RedshiftS3BackupMode":{ "type":"string", @@ -2803,10 +2848,10 @@ "members":{ "DurationInSeconds":{ "shape":"RetryDurationInSeconds", - "documentation":"

The period of time during which Kinesis Data Firehose retries to deliver data to the specified Amazon S3 prefix.

" + "documentation":"

The period of time during which Firehose retries to deliver data to the specified Amazon S3 prefix.

" } }, - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.

" + "documentation":"

The retry behavior in case Firehose is unable to deliver data to an Amazon S3 prefix.

" }, "RoleARN":{ "type":"string", @@ -2842,7 +2887,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" + "documentation":"

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -2887,7 +2932,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" + "documentation":"

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -2925,7 +2970,7 @@ }, "ErrorOutputPrefix":{ "shape":"ErrorOutputPrefix", - "documentation":"

A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" + "documentation":"

A prefix that Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see Custom Prefixes for Amazon S3 Objects.

" }, "BufferingHints":{ "shape":"BufferingHints", @@ -2951,7 +2996,7 @@ "members":{ "RoleARN":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

The role that Kinesis Data Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Kinesis Data Firehose. Cross-account roles aren't allowed.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

" + "documentation":"

The role that Firehose can use to access Amazon Web Services Glue. This role must be in the same account you use for Firehose. Cross-account roles aren't allowed.

If the SchemaConfiguration request parameter is used as part of invoking the CreateDeliveryStream API, then the RoleARN property is required and its value must be specified.

" }, "CatalogId":{ "shape":"NonEmptyStringWithoutWhitespace", @@ -2971,10 +3016,10 @@ }, "VersionId":{ "shape":"NonEmptyStringWithoutWhitespace", - "documentation":"

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

" + "documentation":"

Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to LATEST, Firehose uses the most recent version. This means that any updates to the table are automatically picked up.

" } }, - "documentation":"

Specifies the schema to which you want Kinesis Data Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

" + "documentation":"

Specifies the schema to which you want Firehose to configure your data before it writes it to Amazon S3. This parameter is required if Enabled is set to true.

" }, "SecurityGroupIdList":{ "type":"list", @@ -2994,7 +3039,7 @@ "documentation":"

A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see Apache ORC.

" } }, - "documentation":"

The serializer that you want Kinesis Data Firehose to use to convert data to the target format before writing it to Amazon S3. Kinesis Data Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.

" + "documentation":"

The serializer that you want Firehose to use to convert data to the target format before writing it to Amazon S3. Firehose supports two types of serializers: the ORC SerDe and the Parquet SerDe.

" }, "ServiceUnavailableException":{ "type":"structure", @@ -3004,7 +3049,7 @@ "documentation":"

A message that provides information about the error.

" } }, - "documentation":"

The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Kinesis Data Firehose Limits.

", + "documentation":"

The service is unavailable. Back off and retry the operation. If you continue to see the exception, throughput limits for the delivery stream may have been exceeded. For more information about limits and how to request an increase, see Amazon Firehose Limits.

", "exception":true, "fault":true }, @@ -3013,6 +3058,335 @@ "max":128, "min":1 }, + "SnowflakeAccountUrl":{ + "type":"string", + "max":2048, + "min":24, + "pattern":".+?\\.snowflakecomputing\\.com", + "sensitive":true + }, + "SnowflakeContentColumnName":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "SnowflakeDataLoadingOption":{ + "type":"string", + "enum":[ + "JSON_MAPPING", + "VARIANT_CONTENT_MAPPING", + "VARIANT_CONTENT_AND_METADATA_MAPPING" + ] + }, + "SnowflakeDatabase":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "SnowflakeDestinationConfiguration":{ + "type":"structure", + "required":[ + "AccountUrl", + "PrivateKey", + "User", + "Database", + "Schema", + "Table", + "RoleARN", + "S3Configuration" + ], + "members":{ + "AccountUrl":{ + "shape":"SnowflakeAccountUrl", + "documentation":"

URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.

" + }, + "PrivateKey":{ + "shape":"SnowflakePrivateKey", + "documentation":"

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

" + }, + "KeyPassphrase":{ + "shape":"SnowflakeKeyPassphrase", + "documentation":"

Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.

" + }, + "User":{ + "shape":"SnowflakeUser", + "documentation":"

User login name for the Snowflake account.

" + }, + "Database":{ + "shape":"SnowflakeDatabase", + "documentation":"

All data in Snowflake is maintained in databases.

" + }, + "Schema":{ + "shape":"SnowflakeSchema", + "documentation":"

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

" + }, + "Table":{ + "shape":"SnowflakeTable", + "documentation":"

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

" + }, + "SnowflakeRoleConfiguration":{ + "shape":"SnowflakeRoleConfiguration", + "documentation":"

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + }, + "DataLoadingOption":{ + "shape":"SnowflakeDataLoadingOption", + "documentation":"

Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

" + }, + "MetaDataColumnName":{ + "shape":"SnowflakeMetaDataColumnName", + "documentation":"

The name of the record metadata column

" + }, + "ContentColumnName":{ + "shape":"SnowflakeContentColumnName", + "documentation":"

The name of the record content column

" + }, + "SnowflakeVpcConfiguration":{ + "shape":"SnowflakeVpcConfiguration", + "documentation":"

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake

" + }, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the Snowflake role

" + }, + "RetryOptions":{ + "shape":"SnowflakeRetryOptions", + "documentation":"

The time period where Firehose will retry sending data to the chosen HTTP endpoint.

" + }, + "S3BackupMode":{ + "shape":"SnowflakeS3BackupMode", + "documentation":"

Choose an S3 backup mode

" + }, + "S3Configuration":{"shape":"S3DestinationConfiguration"} + }, + "documentation":"

Configure Snowflake destination

" + }, + "SnowflakeDestinationDescription":{ + "type":"structure", + "members":{ + "AccountUrl":{ + "shape":"SnowflakeAccountUrl", + "documentation":"

URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.

" + }, + "User":{ + "shape":"SnowflakeUser", + "documentation":"

User login name for the Snowflake account.

" + }, + "Database":{ + "shape":"SnowflakeDatabase", + "documentation":"

All data in Snowflake is maintained in databases.

" + }, + "Schema":{ + "shape":"SnowflakeSchema", + "documentation":"

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

" + }, + "Table":{ + "shape":"SnowflakeTable", + "documentation":"

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

" + }, + "SnowflakeRoleConfiguration":{ + "shape":"SnowflakeRoleConfiguration", + "documentation":"

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + }, + "DataLoadingOption":{ + "shape":"SnowflakeDataLoadingOption", + "documentation":"

Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

" + }, + "MetaDataColumnName":{ + "shape":"SnowflakeMetaDataColumnName", + "documentation":"

The name of the record metadata column

" + }, + "ContentColumnName":{ + "shape":"SnowflakeContentColumnName", + "documentation":"

The name of the record content column

" + }, + "SnowflakeVpcConfiguration":{ + "shape":"SnowflakeVpcConfiguration", + "documentation":"

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake

" + }, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the Snowflake role

" + }, + "RetryOptions":{ + "shape":"SnowflakeRetryOptions", + "documentation":"

The time period where Firehose will retry sending data to the chosen HTTP endpoint.

" + }, + "S3BackupMode":{ + "shape":"SnowflakeS3BackupMode", + "documentation":"

Choose an S3 backup mode

" + }, + "S3DestinationDescription":{"shape":"S3DestinationDescription"} + }, + "documentation":"

Optional Snowflake destination description

" + }, + "SnowflakeDestinationUpdate":{ + "type":"structure", + "members":{ + "AccountUrl":{ + "shape":"SnowflakeAccountUrl", + "documentation":"

URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.

" + }, + "PrivateKey":{ + "shape":"SnowflakePrivateKey", + "documentation":"

The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.

" + }, + "KeyPassphrase":{ + "shape":"SnowflakeKeyPassphrase", + "documentation":"

Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.

" + }, + "User":{ + "shape":"SnowflakeUser", + "documentation":"

User login name for the Snowflake account.

" + }, + "Database":{ + "shape":"SnowflakeDatabase", + "documentation":"

All data in Snowflake is maintained in databases.

" + }, + "Schema":{ + "shape":"SnowflakeSchema", + "documentation":"

Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views

" + }, + "Table":{ + "shape":"SnowflakeTable", + "documentation":"

All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.

" + }, + "SnowflakeRoleConfiguration":{ + "shape":"SnowflakeRoleConfiguration", + "documentation":"

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + }, + "DataLoadingOption":{ + "shape":"SnowflakeDataLoadingOption", + "documentation":"

JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.

" + }, + "MetaDataColumnName":{ + "shape":"SnowflakeMetaDataColumnName", + "documentation":"

The name of the record metadata column

" + }, + "ContentColumnName":{ + "shape":"SnowflakeContentColumnName", + "documentation":"

The name of the content metadata column

" + }, + "CloudWatchLoggingOptions":{"shape":"CloudWatchLoggingOptions"}, + "ProcessingConfiguration":{"shape":"ProcessingConfiguration"}, + "RoleARN":{ + "shape":"RoleARN", + "documentation":"

The Amazon Resource Name (ARN) of the Snowflake role

" + }, + "RetryOptions":{ + "shape":"SnowflakeRetryOptions", + "documentation":"

Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.

" + }, + "S3BackupMode":{ + "shape":"SnowflakeS3BackupMode", + "documentation":"

Choose an S3 backup mode

" + }, + "S3Update":{"shape":"S3DestinationUpdate"} + }, + "documentation":"

Update to configuration settings

" + }, + "SnowflakeKeyPassphrase":{ + "type":"string", + "max":255, + "min":7, + "sensitive":true + }, + "SnowflakeMetaDataColumnName":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "SnowflakePrivateKey":{ + "type":"string", + "max":4096, + "min":256, + "pattern":"^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$", + "sensitive":true + }, + "SnowflakePrivateLinkVpceId":{ + "type":"string", + "max":255, + "min":47, + "pattern":"([a-zA-Z0-9\\-\\_]+\\.){2,3}vpce\\.[a-zA-Z0-9\\-]*\\.vpce-svc\\-[a-zA-Z0-9\\-]{17}$", + "sensitive":true + }, + "SnowflakeRetryDurationInSeconds":{ + "type":"integer", + "max":7200, + "min":0 + }, + "SnowflakeRetryOptions":{ + "type":"structure", + "members":{ + "DurationInSeconds":{ + "shape":"SnowflakeRetryDurationInSeconds", + "documentation":"

the time period where Firehose will retry sending data to the chosen HTTP endpoint.

" + } + }, + "documentation":"

Specify how long Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Firehose to retry sending data, set this value to 0.

" + }, + "SnowflakeRole":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "SnowflakeRoleConfiguration":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"BooleanObject", + "documentation":"

Enable Snowflake role

" + }, + "SnowflakeRole":{ + "shape":"SnowflakeRole", + "documentation":"

The Snowflake role you wish to configure

" + } + }, + "documentation":"

Optionally configure a Snowflake role. Otherwise the default user role will be used.

" + }, + "SnowflakeS3BackupMode":{ + "type":"string", + "enum":[ + "FailedDataOnly", + "AllData" + ] + }, + "SnowflakeSchema":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "SnowflakeTable":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "SnowflakeUser":{ + "type":"string", + "max":255, + "min":1, + "sensitive":true + }, + "SnowflakeVpcConfiguration":{ + "type":"structure", + "required":["PrivateLinkVpceId"], + "members":{ + "PrivateLinkVpceId":{ + "shape":"SnowflakePrivateLinkVpceId", + "documentation":"

The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake

" + } + }, + "documentation":"

Configure a Snowflake VPC

" + }, "SourceDescription":{ "type":"structure", "members":{ @@ -3025,7 +3399,7 @@ "documentation":"

The configuration description for the Amazon MSK cluster to be used as the source for a delivery stream.

" } }, - "documentation":"

Details about a Kinesis data stream used as the source for a Kinesis Data Firehose delivery stream.

" + "documentation":"

Details about a Kinesis data stream used as the source for a Firehose delivery stream.

" }, "SplunkBufferingHints":{ "type":"structure", @@ -3062,7 +3436,7 @@ "members":{ "HECEndpoint":{ "shape":"HECEndpoint", - "documentation":"

The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

" + "documentation":"

The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

" }, "HECEndpointType":{ "shape":"HECEndpointType", @@ -3074,15 +3448,15 @@ }, "HECAcknowledgmentTimeoutInSeconds":{ "shape":"HECAcknowledgmentTimeoutInSeconds", - "documentation":"

The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

" + "documentation":"

The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

" }, "RetryOptions":{ "shape":"SplunkRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.

" + "documentation":"

The retry behavior in case Firehose is unable to deliver data to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.

" }, "S3BackupMode":{ "shape":"SplunkS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

" }, "S3Configuration":{ "shape":"S3DestinationConfiguration", @@ -3108,7 +3482,7 @@ "members":{ "HECEndpoint":{ "shape":"HECEndpoint", - "documentation":"

The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

" + "documentation":"

The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

" }, "HECEndpointType":{ "shape":"HECEndpointType", @@ -3120,15 +3494,15 @@ }, "HECAcknowledgmentTimeoutInSeconds":{ "shape":"HECAcknowledgmentTimeoutInSeconds", - "documentation":"

The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

" + "documentation":"

The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

" }, "RetryOptions":{ "shape":"SplunkRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

" + "documentation":"

The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

" }, "S3BackupMode":{ "shape":"SplunkS3BackupMode", - "documentation":"

Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.

" + "documentation":"

Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllDocuments, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. Default value is FailedDocumentsOnly.

" }, "S3DestinationDescription":{ "shape":"S3DestinationDescription", @@ -3154,7 +3528,7 @@ "members":{ "HECEndpoint":{ "shape":"HECEndpoint", - "documentation":"

The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends your data.

" + "documentation":"

The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.

" }, "HECEndpointType":{ "shape":"HECEndpointType", @@ -3166,15 +3540,15 @@ }, "HECAcknowledgmentTimeoutInSeconds":{ "shape":"HECAcknowledgmentTimeoutInSeconds", - "documentation":"

The amount of time that Kinesis Data Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Kinesis Data Firehose either tries to send the data again or considers it an error, based on your retry settings.

" + "documentation":"

The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.

" }, "RetryOptions":{ "shape":"SplunkRetryOptions", - "documentation":"

The retry behavior in case Kinesis Data Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

" + "documentation":"

The retry behavior in case Firehose is unable to deliver data to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.

" }, "S3BackupMode":{ "shape":"SplunkS3BackupMode", - "documentation":"

Specifies how you want Kinesis Data Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

" + "documentation":"

Specifies how you want Firehose to back up documents to Amazon S3. When set to FailedDocumentsOnly, Firehose writes any data that could not be indexed to the configured Amazon S3 destination. When set to AllEvents, Firehose delivers all incoming records to Amazon S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly.

You can update this backup mode from FailedEventsOnly to AllEvents. You can't update it from AllEvents to FailedEventsOnly.

" }, "S3Update":{ "shape":"S3DestinationUpdate", @@ -3205,10 +3579,10 @@ "members":{ "DurationInSeconds":{ "shape":"SplunkRetryDurationInSeconds", - "documentation":"

The total amount of time that Kinesis Data Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Kinesis Data Firehose waits for acknowledgment from Splunk after each attempt.

" + "documentation":"

The total amount of time that Firehose spends on retries. This duration starts after the initial attempt to send data to Splunk fails. It doesn't include the periods during which Firehose waits for acknowledgment from Splunk after each attempt.

" } }, - "documentation":"

Configures retry behavior in case Kinesis Data Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.

" + "documentation":"

Configures retry behavior in case Firehose is unable to deliver documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.

" }, "SplunkS3BackupMode":{ "type":"string", @@ -3399,6 +3773,10 @@ "AmazonOpenSearchServerlessDestinationUpdate":{ "shape":"AmazonOpenSearchServerlessDestinationUpdate", "documentation":"

Describes an update for a destination in the Serverless offering for Amazon OpenSearch Service.

" + }, + "SnowflakeDestinationUpdate":{ + "shape":"SnowflakeDestinationUpdate", + "documentation":"

Update to the Snowflake destination condiguration settings

" } } }, @@ -3424,15 +3802,15 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

The IDs of the subnets that you want Kinesis Data Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" + "documentation":"

The IDs of the subnets that you want Firehose to use to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:

If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

" + "documentation":"

The ARN of the IAM role that you want the delivery stream to use to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.

" }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"

The IDs of the security groups that you want Kinesis Data Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

" + "documentation":"

The IDs of the security groups that you want Firehose to use when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups here, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

" } }, "documentation":"

The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless destination.

" @@ -3448,15 +3826,15 @@ "members":{ "SubnetIds":{ "shape":"SubnetIdList", - "documentation":"

The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Kinesis Data Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Kinesis Data Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Kinesis Data Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" + "documentation":"

The IDs of the subnets that Firehose uses to create ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables and inbound and outbound rules allow traffic to flow from the subnets whose IDs are specified here to the subnets that have the destination Amazon ES endpoints. Firehose creates at least one ENI in each of the subnets that are specified here. Do not delete or modify these ENIs.

The number of ENIs that Firehose creates in the subnets specified here scales up and down automatically based on throughput. To enable Firehose to scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To help you calculate the quota you need, assume that Firehose can create up to three ENIs for this delivery stream for each of the subnets specified here. For more information about ENI quota, see Network Interfaces in the Amazon VPC Quotas topic.

" }, "RoleARN":{ "shape":"RoleARN", - "documentation":"

The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Kinesis Data Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Kinesis Data Firehose service principal and that it grants the following permissions:

If you revoke these permissions after you create the delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

" + "documentation":"

The ARN of the IAM role that the delivery stream uses to create endpoints in the destination VPC. You can use your existing Firehose delivery role or you can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a degradation in performance.

" }, "SecurityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"

The IDs of the security groups that Kinesis Data Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

" + "documentation":"

The IDs of the security groups that Firehose uses when it creates ENIs in the VPC of the Amazon ES destination. You can use the same security group that the Amazon ES domain uses or different ones. If you specify different security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure that the Amazon ES domain's security group allows HTTPS traffic from the security groups specified here. If you use the same security group for both your delivery stream and the Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more information about security group rules, see Security group rules in the Amazon VPC documentation.

" }, "VpcId":{ "shape":"NonEmptyStringWithoutWhitespace", @@ -3466,5 +3844,5 @@ "documentation":"

The details of the VPC of the Amazon ES destination.

" } }, - "documentation":"Amazon Kinesis Data Firehose API Reference

Amazon Kinesis Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

" + "documentation":"Amazon Data Firehose

Amazon Data Firehose is a fully managed service that delivers real-time streaming data to destinations such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and various other supportd destinations.

" } diff -Nru awscli-2.15.9/awscli/botocore/data/glue/2017-03-31/service-2.json awscli-2.15.22/awscli/botocore/data/glue/2017-03-31/service-2.json --- awscli-2.15.9/awscli/botocore/data/glue/2017-03-31/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/glue/2017-03-31/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -5074,7 +5074,8 @@ "type":"string", "enum":[ "DISABLED", - "SSE-KMS" + "SSE-KMS", + "SSE-KMS-WITH-SERVICE-ROLE" ] }, "CatalogEntries":{ @@ -6273,7 +6274,7 @@ }, "ConnectionProperties":{ "shape":"ConnectionProperties", - "documentation":"

These key-value pairs define parameters for the connection:

" + "documentation":"

These key-value pairs define parameters for the connection:

" }, "PhysicalConnectionRequirements":{ "shape":"PhysicalConnectionRequirements", @@ -6390,6 +6391,9 @@ "CONNECTOR_TYPE", "CONNECTOR_CLASS_NAME", "KAFKA_SASL_MECHANISM", + "KAFKA_SASL_PLAIN_USERNAME", + "KAFKA_SASL_PLAIN_PASSWORD", + "ENCRYPTED_KAFKA_SASL_PLAIN_PASSWORD", "KAFKA_SASL_SCRAM_USERNAME", "KAFKA_SASL_SCRAM_PASSWORD", "KAFKA_SASL_SCRAM_SECRETS_ARN", @@ -10185,6 +10189,10 @@ "SseAwsKmsKeyId":{ "shape":"NameString", "documentation":"

The ID of the KMS key to use for encryption at rest.

" + }, + "CatalogEncryptionServiceRole":{ + "shape":"IAMRoleArn", + "documentation":"

The role that Glue assumes to encrypt and decrypt the Data Catalog objects on the caller's behalf.

" } }, "documentation":"

Specifies the encryption-at-rest configuration for the Data Catalog.

" @@ -11705,7 +11713,7 @@ "documentation":"

A continuation token, if this is a continuation call.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"OrchestrationPageSize200", "documentation":"

The maximum size of the response.

" } } @@ -12840,7 +12848,7 @@ "documentation":"

The name of the job to retrieve triggers for. The trigger that can start this job is returned, and if there is no such trigger, all triggers are returned.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"OrchestrationPageSize200", "documentation":"

The maximum size of the response.

" } } @@ -13549,6 +13557,10 @@ "type":"list", "member":{"shape":"HudiTarget"} }, + "IAMRoleArn":{ + "type":"string", + "pattern":"^arn:aws(-(cn|us-gov|iso(-[bef])?))?:iam::[0-9]{12}:role/.+" + }, "IcebergInput":{ "type":"structure", "required":["MetadataOperation"], @@ -14114,7 +14126,7 @@ }, "Runtime":{ "shape":"RuntimeNameString", - "documentation":"

In Ray jobs, Runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Working with Ray jobs in the Glue Developer Guide.

" + "documentation":"

In Ray jobs, Runtime is used to specify the versions of Ray, Python and additional libraries available in your environment. This field is not used in other job types. For supported runtime environment values, see Supported Ray runtime environments in the Glue Developer Guide.

" } }, "documentation":"

Specifies code that runs when a job is run.

" @@ -14776,7 +14788,7 @@ "documentation":"

A continuation token, if this is a continuation request.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"OrchestrationPageSize25", "documentation":"

The maximum size of a list to return.

" }, "Tags":{ @@ -15372,7 +15384,7 @@ "documentation":"

The name of the job for which to retrieve triggers. The trigger that can start this job is returned. If there is no such trigger, all triggers are returned.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"OrchestrationPageSize200", "documentation":"

The maximum size of a list to return.

" }, "Tags":{ @@ -15402,7 +15414,7 @@ "documentation":"

A continuation token, if this is a continuation request.

" }, "MaxResults":{ - "shape":"PageSize", + "shape":"OrchestrationPageSize25", "documentation":"

The maximum size of a list to return.

" } } @@ -16247,6 +16259,18 @@ "min":1, "pattern":"[\\.\\-_A-Za-z0-9]+" }, + "OrchestrationPageSize200":{ + "type":"integer", + "box":true, + "max":200, + "min":1 + }, + "OrchestrationPageSize25":{ + "type":"integer", + "box":true, + "max":25, + "min":1 + }, "OrchestrationRoleArn":{ "type":"string", "max":2048, diff -Nru awscli-2.15.9/awscli/botocore/data/guardduty/2017-11-28/service-2.json awscli-2.15.22/awscli/botocore/data/guardduty/2017-11-28/service-2.json --- awscli-2.15.9/awscli/botocore/data/guardduty/2017-11-28/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/guardduty/2017-11-28/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -494,7 +494,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists Amazon GuardDuty findings statistics for the specified detector ID.

" + "documentation":"

Lists Amazon GuardDuty findings statistics for the specified detector ID.

There might be regional differences because some flags might not be available in all the Regions where GuardDuty is currently supported. For more information, see Regions and endpoints.

" }, "GetIPSet":{ "name":"GetIPSet", @@ -720,7 +720,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists Amazon GuardDuty findings for the specified detector ID.

" + "documentation":"

Lists GuardDuty findings for the specified detector ID.

There might be regional differences because some flags might not be available in all the Regions where GuardDuty is currently supported. For more information, see Regions and endpoints.

" }, "ListIPSets":{ "name":"ListIPSets", @@ -934,7 +934,7 @@ {"shape":"BadRequestException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Updates the Amazon GuardDuty detector specified by the detectorId.

There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints.

" + "documentation":"

Updates the GuardDuty detector specified by the detectorId.

There might be regional differences because some data sources might not be available in all the Amazon Web Services Regions where GuardDuty is presently supported. For more information, see Regions and endpoints.

" }, "UpdateFilter":{ "name":"UpdateFilter", @@ -3567,7 +3567,8 @@ "Email":{ "type":"string", "max":64, - "min":1 + "min":1, + "sensitive":true }, "EnableOrganizationAdminAccountRequest":{ "type":"structure", @@ -5686,7 +5687,7 @@ "type":"structure", "members":{ "IpAddressV4":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

The IPv4 local address of the connection.

", "locationName":"ipAddressV4" } @@ -6089,7 +6090,7 @@ "locationName":"privateDnsName" }, "PrivateIpAddress":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

The private IP address of the EC2 instance.

", "locationName":"privateIpAddress" }, @@ -6658,7 +6659,7 @@ "locationName":"privateDnsName" }, "PrivateIpAddress":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

The private IP address of the EC2 instance.

", "locationName":"privateIpAddress" } @@ -6916,7 +6917,7 @@ "locationName":"geoLocation" }, "IpAddressV4":{ - "shape":"String", + "shape":"SensitiveString", "documentation":"

The IPv4 remote address of the connection.

", "locationName":"ipAddressV4" }, @@ -7568,6 +7569,10 @@ "type":"list", "member":{"shape":"SecurityGroup"} }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, "Service":{ "type":"structure", "members":{ diff -Nru awscli-2.15.9/awscli/botocore/data/healthlake/2017-07-01/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/healthlake/2017-07-01/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/healthlake/2017-07-01/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/healthlake/2017-07-01/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/healthlake/2017-07-01/service-2.json awscli-2.15.22/awscli/botocore/data/healthlake/2017-07-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/healthlake/2017-07-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/healthlake/2017-07-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -546,7 +546,7 @@ "members":{ "ImportJobProperties":{ "shape":"ImportJobProperties", - "documentation":"

The properties of the Import job request, including the ID, ARN, name, and the status of the job.

" + "documentation":"

The properties of the Import job request, including the ID, ARN, name, status of the job, and the progress report of the job.

" } } }, @@ -613,6 +613,8 @@ "type":"string", "enum":["R4"] }, + "GenericDouble":{"type":"double"}, + "GenericLong":{"type":"long"}, "IamRoleArn":{ "type":"string", "max":2048, @@ -681,6 +683,10 @@ "documentation":"

The input data configuration that was supplied when the Import job was created.

" }, "JobOutputDataConfig":{"shape":"OutputDataConfig"}, + "JobProgressReport":{ + "shape":"JobProgressReport", + "documentation":"

Displays the progress of the import job, including total resources scanned, total resources ingested, and total size of data ingested.

" + }, "DataAccessRoleArn":{ "shape":"IamRoleArn", "documentation":"

The Amazon Resource Name (ARN) that gives AWS HealthLake access to your input data.

" @@ -690,7 +696,7 @@ "documentation":"

An explanation of any errors that may have occurred during the FHIR import job.

" } }, - "documentation":"

Displays the properties of the import job, including the ID, Arn, Name, and the status of the data store.

" + "documentation":"

Displays the properties of the import job, including the ID, Arn, Name, the status of the job, and the progress report of the job.

" }, "ImportJobPropertiesList":{ "type":"list", @@ -728,6 +734,44 @@ "min":1, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$" }, + "JobProgressReport":{ + "type":"structure", + "members":{ + "TotalNumberOfScannedFiles":{ + "shape":"GenericLong", + "documentation":"

The number of files scanned from input S3 bucket.

" + }, + "TotalSizeOfScannedFilesInMB":{ + "shape":"GenericDouble", + "documentation":"

The size (in MB) of the files scanned from the input S3 bucket.

" + }, + "TotalNumberOfImportedFiles":{ + "shape":"GenericLong", + "documentation":"

The number of files imported so far.

" + }, + "TotalNumberOfResourcesScanned":{ + "shape":"GenericLong", + "documentation":"

The number of resources scanned from the input S3 bucket.

" + }, + "TotalNumberOfResourcesImported":{ + "shape":"GenericLong", + "documentation":"

The number of resources imported so far.

" + }, + "TotalNumberOfResourcesWithCustomerError":{ + "shape":"GenericLong", + "documentation":"

The number of resources that failed due to customer error.

" + }, + "TotalNumberOfFilesReadWithCustomerError":{ + "shape":"GenericLong", + "documentation":"

The number of files that failed to be read from the input S3 bucket due to customer error.

" + }, + "Throughput":{ + "shape":"GenericDouble", + "documentation":"

The throughput (in MB/sec) of the import job.

" + } + }, + "documentation":"

The progress report of an import job.

" + }, "JobStatus":{ "type":"string", "enum":[ @@ -882,7 +926,7 @@ "members":{ "ImportJobPropertiesList":{ "shape":"ImportJobPropertiesList", - "documentation":"

The properties of a listed FHIR import jobs, including the ID, ARN, name, and the status of the job.

" + "documentation":"

The properties of a listed FHIR import jobs, including the ID, ARN, name, the status of the job, and the progress report of the job.

" }, "NextToken":{ "shape":"NextToken", diff -Nru awscli-2.15.9/awscli/botocore/data/inspector2/2020-06-08/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/inspector2/2020-06-08/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/inspector2/2020-06-08/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/inspector2/2020-06-08/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/inspector2/2020-06-08/paginators-1.json awscli-2.15.22/awscli/botocore/data/inspector2/2020-06-08/paginators-1.json --- awscli-2.15.9/awscli/botocore/data/inspector2/2020-06-08/paginators-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/inspector2/2020-06-08/paginators-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -57,6 +57,36 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "vulnerabilities" + }, + "GetCisScanResultDetails": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "scanResultDetails" + }, + "ListCisScanConfigurations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "scanConfigurations" + }, + "ListCisScanResultsAggregatedByChecks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "checkAggregations" + }, + "ListCisScanResultsAggregatedByTargetResource": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "targetResourceAggregations" + }, + "ListCisScans": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "scans" } } } diff -Nru awscli-2.15.9/awscli/botocore/data/inspector2/2020-06-08/service-2.json awscli-2.15.22/awscli/botocore/data/inspector2/2020-06-08/service-2.json --- awscli-2.15.9/awscli/botocore/data/inspector2/2020-06-08/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/inspector2/2020-06-08/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -170,6 +170,23 @@ "documentation":"

Cancels a software bill of materials (SBOM) report.

", "idempotent":true }, + "CreateCisScanConfiguration":{ + "name":"CreateCisScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/cis/scan-configuration/create", + "responseCode":200 + }, + "input":{"shape":"CreateCisScanConfigurationRequest"}, + "output":{"shape":"CreateCisScanConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a CIS scan configuration.

" + }, "CreateFilter":{ "name":"CreateFilter", "http":{ @@ -226,6 +243,24 @@ "documentation":"

Creates a software bill of materials (SBOM) report.

", "idempotent":true }, + "DeleteCisScanConfiguration":{ + "name":"DeleteCisScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/cis/scan-configuration/delete", + "responseCode":200 + }, + "input":{"shape":"DeleteCisScanConfigurationRequest"}, + "output":{"shape":"DeleteCisScanConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a CIS scan configuration.

" + }, "DeleteFilter":{ "name":"DeleteFilter", "http":{ @@ -352,6 +387,40 @@ ], "documentation":"

Enables the Amazon Inspector delegated administrator for your Organizations organization.

" }, + "GetCisScanReport":{ + "name":"GetCisScanReport", + "http":{ + "method":"POST", + "requestUri":"/cis/scan/report/get", + "responseCode":200 + }, + "input":{"shape":"GetCisScanReportRequest"}, + "output":{"shape":"GetCisScanReportResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves a CIS scan report.

" + }, + "GetCisScanResultDetails":{ + "name":"GetCisScanResultDetails", + "http":{ + "method":"POST", + "requestUri":"/cis/scan-result/details/get", + "responseCode":200 + }, + "input":{"shape":"GetCisScanResultDetailsRequest"}, + "output":{"shape":"GetCisScanResultDetailsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves CIS scan result details.

" + }, "GetConfiguration":{ "name":"GetConfiguration", "http":{ @@ -493,6 +562,74 @@ ], "documentation":"

Lists the permissions an account has to configure Amazon Inspector.

" }, + "ListCisScanConfigurations":{ + "name":"ListCisScanConfigurations", + "http":{ + "method":"POST", + "requestUri":"/cis/scan-configuration/list", + "responseCode":200 + }, + "input":{"shape":"ListCisScanConfigurationsRequest"}, + "output":{"shape":"ListCisScanConfigurationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists CIS scan configurations.

" + }, + "ListCisScanResultsAggregatedByChecks":{ + "name":"ListCisScanResultsAggregatedByChecks", + "http":{ + "method":"POST", + "requestUri":"/cis/scan-result/check/list", + "responseCode":200 + }, + "input":{"shape":"ListCisScanResultsAggregatedByChecksRequest"}, + "output":{"shape":"ListCisScanResultsAggregatedByChecksResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists scan results aggregated by checks.

" + }, + "ListCisScanResultsAggregatedByTargetResource":{ + "name":"ListCisScanResultsAggregatedByTargetResource", + "http":{ + "method":"POST", + "requestUri":"/cis/scan-result/resource/list", + "responseCode":200 + }, + "input":{"shape":"ListCisScanResultsAggregatedByTargetResourceRequest"}, + "output":{"shape":"ListCisScanResultsAggregatedByTargetResourceResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists scan results aggregated by a target resource.

" + }, + "ListCisScans":{ + "name":"ListCisScans", + "http":{ + "method":"POST", + "requestUri":"/cis/scan/list", + "responseCode":200 + }, + "input":{"shape":"ListCisScansRequest"}, + "output":{"shape":"ListCisScansResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a CIS scan list.

" + }, "ListCoverage":{ "name":"ListCoverage", "http":{ @@ -678,6 +815,82 @@ ], "documentation":"

Lists Amazon Inspector coverage details for a specific vulnerability.

" }, + "SendCisSessionHealth":{ + "name":"SendCisSessionHealth", + "http":{ + "method":"PUT", + "requestUri":"/cissession/health/send", + "responseCode":200 + }, + "input":{"shape":"SendCisSessionHealthRequest"}, + "output":{"shape":"SendCisSessionHealthResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Sends a CIS session health. This API is used by the Amazon Inspector SSM plugin to communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start a CIS scan session for the scan ID supplied by the service.

", + "idempotent":true + }, + "SendCisSessionTelemetry":{ + "name":"SendCisSessionTelemetry", + "http":{ + "method":"PUT", + "requestUri":"/cissession/telemetry/send", + "responseCode":200 + }, + "input":{"shape":"SendCisSessionTelemetryRequest"}, + "output":{"shape":"SendCisSessionTelemetryResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Sends a CIS session telemetry. This API is used by the Amazon Inspector SSM plugin to communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start a CIS scan session for the scan ID supplied by the service.

", + "idempotent":true + }, + "StartCisSession":{ + "name":"StartCisSession", + "http":{ + "method":"PUT", + "requestUri":"/cissession/start", + "responseCode":200 + }, + "input":{"shape":"StartCisSessionRequest"}, + "output":{"shape":"StartCisSessionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Starts a CIS session. This API is used by the Amazon Inspector SSM plugin to communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start a CIS scan session for the scan ID supplied by the service.

", + "idempotent":true + }, + "StopCisSession":{ + "name":"StopCisSession", + "http":{ + "method":"PUT", + "requestUri":"/cissession/stop", + "responseCode":200 + }, + "input":{"shape":"StopCisSessionRequest"}, + "output":{"shape":"StopCisSessionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Stops a CIS session. This API is used by the Amazon Inspector SSM plugin to communicate with the Amazon Inspector service. The Amazon Inspector SSM plugin calls this API to start a CIS scan session for the scan ID supplied by the service.

", + "idempotent":true + }, "TagResource":{ "name":"TagResource", "http":{ @@ -714,6 +927,24 @@ ], "documentation":"

Removes tags from a resource.

" }, + "UpdateCisScanConfiguration":{ + "name":"UpdateCisScanConfiguration", + "http":{ + "method":"POST", + "requestUri":"/cis/scan-configuration/update", + "responseCode":200 + }, + "input":{"shape":"UpdateCisScanConfigurationRequest"}, + "output":{"shape":"UpdateCisScanConfigurationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates a CIS scan configuration.

" + }, "UpdateConfiguration":{ "name":"UpdateConfiguration", "http":{ @@ -764,7 +995,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates an encryption key. A ResourceNotFoundException means that an AWS owned key is being used for encryption.

", + "documentation":"

Updates an encryption key. A ResourceNotFoundException means that an Amazon Web Services owned key is being used for encryption.

", "idempotent":true }, "UpdateFilter":{ @@ -899,6 +1130,12 @@ "min":12, "pattern":"^\\d{12}$" }, + "AccountIdFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, "AccountIdSet":{ "type":"list", "member":{"shape":"AccountId"}, @@ -984,11 +1221,11 @@ }, "lambdaFunctionAggregation":{ "shape":"LambdaFunctionAggregation", - "documentation":"

Returns an object with findings aggregated by AWS Lambda function.

" + "documentation":"

Returns an object with findings aggregated by Amazon Web Services Lambda function.

" }, "lambdaLayerAggregation":{ "shape":"LambdaLayerAggregation", - "documentation":"

Returns an object with findings aggregated by AWS Lambda layer.

" + "documentation":"

Returns an object with findings aggregated by Amazon Web Services Lambda layer.

" }, "packageAggregation":{ "shape":"PackageAggregation", @@ -1043,11 +1280,11 @@ }, "lambdaFunctionAggregation":{ "shape":"LambdaFunctionAggregationResponse", - "documentation":"

An aggregation of findings by AWS Lambda function.

" + "documentation":"

An aggregation of findings by Amazon Web Services Lambda function.

" }, "lambdaLayerAggregation":{ "shape":"LambdaLayerAggregationResponse", - "documentation":"

An aggregation of findings by AWS Lambda layer.

" + "documentation":"

An aggregation of findings by Amazon Web Services Lambda layer.

" }, "packageAggregation":{ "shape":"PackageAggregationResponse", @@ -1216,11 +1453,11 @@ }, "lambda":{ "shape":"Boolean", - "documentation":"

Represents whether AWS Lambda standard scans are automatically enabled for new members of your Amazon Inspector organization.

" + "documentation":"

Represents whether Amazon Web Services Lambda standard scans are automatically enabled for new members of your Amazon Inspector organization.

" }, "lambdaCode":{ "shape":"Boolean", - "documentation":"

Represents whether AWS Lambda code scans are automatically enabled for new members of your Amazon Inspector organization.

 </p> 
" + "documentation":"

Represents whether Lambda code scans are automatically enabled for new members of your Amazon Inspector organization.

 </p> 
" } }, "documentation":"

Represents which scan types are automatically enabled for new members of your Amazon Inspector organization.

" @@ -1403,19 +1640,19 @@ "members":{ "architectures":{ "shape":"ArchitectureList", - "documentation":"

The instruction set architecture that the AWS Lambda function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.

" + "documentation":"

The instruction set architecture that the Amazon Web Services Lambda function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.

" }, "codeSha256":{ "shape":"NonEmptyString", - "documentation":"

The SHA256 hash of the AWS Lambda function's deployment package.

" + "documentation":"

The SHA256 hash of the Amazon Web Services Lambda function's deployment package.

" }, "executionRoleArn":{ "shape":"ExecutionRoleArn", - "documentation":"

The AWS Lambda function's execution role.

" + "documentation":"

The Amazon Web Services Lambda function's execution role.

" }, "functionName":{ "shape":"FunctionName", - "documentation":"

The name of the AWS Lambda function.

" + "documentation":"

The name of the Amazon Web Services Lambda function.

" }, "lastModifiedAt":{ "shape":"Timestamp", @@ -1423,7 +1660,7 @@ }, "layers":{ "shape":"LayerList", - "documentation":"

The AWS Lambda function's layers. A Lambda function can have up to five layers.

" + "documentation":"

The Amazon Web Services Lambda function's layers. A Lambda function can have up to five layers.

" }, "packageType":{ "shape":"PackageType", @@ -1431,18 +1668,18 @@ }, "runtime":{ "shape":"Runtime", - "documentation":"

The runtime environment for the AWS Lambda function.

" + "documentation":"

The runtime environment for the Amazon Web Services Lambda function.

" }, "version":{ "shape":"Version", - "documentation":"

The version of the AWS Lambda function.

" + "documentation":"

The version of the Amazon Web Services Lambda function.

" }, "vpcConfig":{ "shape":"LambdaVpcConfig", - "documentation":"

The AWS Lambda function's networking configuration.

" + "documentation":"

The Amazon Web Services Lambda function's networking configuration.

" } }, - "documentation":"

A summary of information about the AWS Lambda function.

" + "documentation":"

A summary of information about the Amazon Web Services Lambda function.

" }, "BadRequestException":{ "type":"structure", @@ -1610,6 +1847,16 @@ } } }, + "BenchmarkProfile":{ + "type":"string", + "max":128, + "min":0 + }, + "BenchmarkVersion":{ + "type":"string", + "max":8, + "min":0 + }, "Boolean":{ "type":"boolean", "box":true @@ -1653,6 +1900,807 @@ } } }, + "CheckCount":{ + "type":"integer", + "max":65536, + "min":0 + }, + "CheckIdFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, + "CisAccountIdList":{ + "type":"list", + "member":{"shape":"AccountId"}, + "max":10000, + "min":1 + }, + "CisCheckAggregation":{ + "type":"structure", + "required":["scanArn"], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the CIS check.

" + }, + "checkDescription":{ + "shape":"String", + "documentation":"

The description for the CIS check.

" + }, + "checkId":{ + "shape":"String", + "documentation":"

The check ID for the CIS check.

" + }, + "level":{ + "shape":"CisSecurityLevel", + "documentation":"

The CIS check level.

" + }, + "platform":{ + "shape":"String", + "documentation":"

The CIS check platform.

" + }, + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The scan ARN for the CIS check scan ARN.

" + }, + "statusCounts":{ + "shape":"StatusCounts", + "documentation":"

The CIS check status counts.

" + }, + "title":{ + "shape":"String", + "documentation":"

The CIS check title.

" + } + }, + "documentation":"

A CIS check.

" + }, + "CisCheckAggregationList":{ + "type":"list", + "member":{"shape":"CisCheckAggregation"}, + "max":1000, + "min":1 + }, + "CisDateFilter":{ + "type":"structure", + "members":{ + "earliestScanStartTime":{ + "shape":"Timestamp", + "documentation":"

The CIS date filter's earliest scan start time.

" + }, + "latestScanStartTime":{ + "shape":"Timestamp", + "documentation":"

The CIS date filter's latest scan start time.

" + } + }, + "documentation":"

The CIS date filter.

" + }, + "CisFindingArn":{ + "type":"string", + "pattern":"^arn:aws(-gov|-cn)?:inspector2:[-.a-z0-9]{0,20}:\\d{12}:owner/\\d{12}/cis-finding/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + }, + "CisFindingArnFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, + "CisFindingStatus":{ + "type":"string", + "enum":[ + "PASSED", + "FAILED", + "SKIPPED" + ] + }, + "CisFindingStatusComparison":{ + "type":"string", + "enum":["EQUALS"] + }, + "CisFindingStatusFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"CisFindingStatusComparison", + "documentation":"

The comparison value of the CIS finding status filter.

" + }, + "value":{ + "shape":"CisFindingStatus", + "documentation":"

The value of the CIS finding status filter.

" + } + }, + "documentation":"

The CIS finding status filter.

" + }, + "CisFindingStatusFilterList":{ + "type":"list", + "member":{"shape":"CisFindingStatusFilter"}, + "max":10, + "min":1 + }, + "CisNumberFilter":{ + "type":"structure", + "members":{ + "lowerInclusive":{ + "shape":"Integer", + "documentation":"

The CIS number filter's lower inclusive.

" + }, + "upperInclusive":{ + "shape":"Integer", + "documentation":"

The CIS number filter's upper inclusive.

" + } + }, + "documentation":"

The CIS number filter.

" + }, + "CisNumberFilterList":{ + "type":"list", + "member":{"shape":"CisNumberFilter"}, + "max":10, + "min":1 + }, + "CisOwnerId":{ + "type":"string", + "pattern":"^\\d{12}|o-[a-z0-9]{10,32}$" + }, + "CisReportStatus":{ + "type":"string", + "enum":[ + "SUCCEEDED", + "FAILED", + "IN_PROGRESS" + ] + }, + "CisResultStatus":{ + "type":"string", + "enum":[ + "PASSED", + "FAILED", + "SKIPPED" + ] + }, + "CisResultStatusComparison":{ + "type":"string", + "enum":["EQUALS"] + }, + "CisResultStatusFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"CisResultStatusComparison", + "documentation":"

The comparison value of the CIS result status filter.

" + }, + "value":{ + "shape":"CisResultStatus", + "documentation":"

The value of the CIS result status filter.

" + } + }, + "documentation":"

The CIS result status filter.

" + }, + "CisResultStatusFilterList":{ + "type":"list", + "member":{"shape":"CisResultStatusFilter"}, + "max":10, + "min":1 + }, + "CisRuleDetails":{ + "type":"blob", + "max":1000, + "min":0 + }, + "CisRuleStatus":{ + "type":"string", + "enum":[ + "FAILED", + "PASSED", + "NOT_EVALUATED", + "INFORMATIONAL", + "UNKNOWN", + "NOT_APPLICABLE", + "ERROR" + ] + }, + "CisScan":{ + "type":"structure", + "required":[ + "scanArn", + "scanConfigurationArn" + ], + "members":{ + "failedChecks":{ + "shape":"Integer", + "documentation":"

The CIS scan's failed checks.

" + }, + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The CIS scan's ARN.

" + }, + "scanConfigurationArn":{ + "shape":"CisScanConfigurationArn", + "documentation":"

The CIS scan's configuration ARN.

" + }, + "scanDate":{ + "shape":"Timestamp", + "documentation":"

The CIS scan's date.

" + }, + "scanName":{ + "shape":"CisScanName", + "documentation":"

The the name of the scan configuration that's associated with this scan.

" + }, + "scheduledBy":{ + "shape":"String", + "documentation":"

The account or organization that schedules the CIS scan.

" + }, + "securityLevel":{ + "shape":"CisSecurityLevel", + "documentation":"

The security level for the CIS scan. Security level refers to the Benchmark levels that CIS assigns to a profile.

" + }, + "status":{ + "shape":"CisScanStatus", + "documentation":"

The CIS scan's status.

" + }, + "targets":{ + "shape":"CisTargets", + "documentation":"

The CIS scan's targets.

" + }, + "totalChecks":{ + "shape":"Integer", + "documentation":"

The CIS scan's total checks.

" + } + }, + "documentation":"

The CIS scan.

" + }, + "CisScanArn":{ + "type":"string", + "pattern":"^arn:aws(-us-gov|-cn)?:inspector2:[-.a-z0-9]{0,20}:\\d{12}:owner/(\\d{12}|o-[a-z0-9]{10,32})/cis-scan/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + }, + "CisScanArnFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, + "CisScanConfiguration":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "ownerId":{ + "shape":"CisOwnerId", + "documentation":"

The CIS scan configuration's owner ID.

" + }, + "scanConfigurationArn":{ + "shape":"CisScanConfigurationArn", + "documentation":"

The CIS scan configuration's scan configuration ARN.

" + }, + "scanName":{ + "shape":"CisScanName", + "documentation":"

The name of the CIS scan configuration.

" + }, + "schedule":{ + "shape":"Schedule", + "documentation":"

The CIS scan configuration's schedule.

" + }, + "securityLevel":{ + "shape":"CisSecurityLevel", + "documentation":"

The CIS scan configuration's security level.

" + }, + "tags":{ + "shape":"CisTagMap", + "documentation":"

The CIS scan configuration's tags.

" + }, + "targets":{ + "shape":"CisTargets", + "documentation":"

The CIS scan configuration's targets.

" + } + }, + "documentation":"

The CIS scan configuration.

" + }, + "CisScanConfigurationArn":{ + "type":"string", + "pattern":"^arn:aws(-us-gov|-cn)?:inspector2:[a-z]{2}(-gov)?-[a-z]+-[0-9]{1}:[0-9]{12}:owner/(o-[a-z0-9]+|[0-9]{12})/cis-configuration/[0-9a-fA-F-]+$" + }, + "CisScanConfigurationArnFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, + "CisScanConfigurationList":{ + "type":"list", + "member":{"shape":"CisScanConfiguration"}, + "max":100, + "min":0 + }, + "CisScanConfigurationsSortBy":{ + "type":"string", + "enum":[ + "SCAN_NAME", + "SCAN_CONFIGURATION_ARN" + ] + }, + "CisScanDateFilterList":{ + "type":"list", + "member":{"shape":"CisDateFilter"}, + "max":1, + "min":1 + }, + "CisScanList":{ + "type":"list", + "member":{"shape":"CisScan"}, + "max":50, + "min":0 + }, + "CisScanName":{ + "type":"string", + "max":128, + "min":1 + }, + "CisScanNameFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, + "CisScanResultDetails":{ + "type":"structure", + "required":["scanArn"], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

The CIS scan result details' account ID.

" + }, + "checkDescription":{ + "shape":"String", + "documentation":"

The account ID that's associated with the CIS scan result details.

" + }, + "checkId":{ + "shape":"String", + "documentation":"

The CIS scan result details' check ID.

" + }, + "findingArn":{ + "shape":"CisFindingArn", + "documentation":"

The CIS scan result details' finding ARN.

" + }, + "level":{ + "shape":"CisSecurityLevel", + "documentation":"

The CIS scan result details' level.

" + }, + "platform":{ + "shape":"String", + "documentation":"

The CIS scan result details' platform.

" + }, + "remediation":{ + "shape":"String", + "documentation":"

The CIS scan result details' remediation.

" + }, + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The CIS scan result details' scan ARN.

" + }, + "status":{ + "shape":"CisFindingStatus", + "documentation":"

The CIS scan result details' status.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The CIS scan result details' status reason.

" + }, + "targetResourceId":{ + "shape":"ResourceId", + "documentation":"

The CIS scan result details' target resource ID.

" + }, + "title":{ + "shape":"String", + "documentation":"

The CIS scan result details' title.

" + } + }, + "documentation":"

The CIS scan result details.

" + }, + "CisScanResultDetailsFilterCriteria":{ + "type":"structure", + "members":{ + "checkIdFilters":{ + "shape":"CheckIdFilterList", + "documentation":"

The criteria's check ID filters.

" + }, + "findingArnFilters":{ + "shape":"CisFindingArnFilterList", + "documentation":"

The criteria's finding ARN filters.

" + }, + "findingStatusFilters":{ + "shape":"CisFindingStatusFilterList", + "documentation":"

The criteria's finding status filters.

" + }, + "securityLevelFilters":{ + "shape":"CisSecurityLevelFilterList", + "documentation":"

The criteria's security level filters. . Security level refers to the Benchmark levels that CIS assigns to a profile.

" + }, + "titleFilters":{ + "shape":"TitleFilterList", + "documentation":"

The criteria's title filters.

" + } + }, + "documentation":"

The CIS scan result details filter criteria.

" + }, + "CisScanResultDetailsList":{ + "type":"list", + "member":{"shape":"CisScanResultDetails"}, + "max":1000, + "min":1 + }, + "CisScanResultDetailsSortBy":{ + "type":"string", + "enum":[ + "CHECK_ID", + "STATUS" + ] + }, + "CisScanResultsAggregatedByChecksFilterCriteria":{ + "type":"structure", + "members":{ + "accountIdFilters":{ + "shape":"OneAccountIdFilterList", + "documentation":"

The criteria's account ID filters.

" + }, + "checkIdFilters":{ + "shape":"CheckIdFilterList", + "documentation":"

The criteria's check ID filters.

" + }, + "failedResourcesFilters":{ + "shape":"CisNumberFilterList", + "documentation":"

The criteria's failed resources filters.

" + }, + "platformFilters":{ + "shape":"PlatformFilterList", + "documentation":"

The criteria's platform filters.

" + }, + "securityLevelFilters":{ + "shape":"CisSecurityLevelFilterList", + "documentation":"

The criteria's security level filters.

" + }, + "titleFilters":{ + "shape":"TitleFilterList", + "documentation":"

The criteria's title filters.

" + } + }, + "documentation":"

The scan results aggregated by checks filter criteria.

" + }, + "CisScanResultsAggregatedByChecksSortBy":{ + "type":"string", + "enum":[ + "CHECK_ID", + "TITLE", + "PLATFORM", + "FAILED_COUNTS", + "SECURITY_LEVEL" + ] + }, + "CisScanResultsAggregatedByTargetResourceFilterCriteria":{ + "type":"structure", + "members":{ + "accountIdFilters":{ + "shape":"AccountIdFilterList", + "documentation":"

The criteria's account ID filters.

" + }, + "checkIdFilters":{ + "shape":"CheckIdFilterList", + "documentation":"

The criteria's check ID filters.

" + }, + "failedChecksFilters":{ + "shape":"CisNumberFilterList", + "documentation":"

The criteria's failed checks filters.

" + }, + "platformFilters":{ + "shape":"PlatformFilterList", + "documentation":"

The criteria's platform filters.

" + }, + "statusFilters":{ + "shape":"CisResultStatusFilterList", + "documentation":"

The criteria's status filter.

" + }, + "targetResourceIdFilters":{ + "shape":"ResourceIdFilterList", + "documentation":"

The criteria's target resource ID filters.

" + }, + "targetResourceTagFilters":{ + "shape":"ResourceTagFilterList", + "documentation":"

The criteria's target resource tag filters.

" + }, + "targetStatusFilters":{ + "shape":"TargetStatusFilterList", + "documentation":"

The criteria's target status filters.

" + }, + "targetStatusReasonFilters":{ + "shape":"TargetStatusReasonFilterList", + "documentation":"

The criteria's target status reason filters.

" + } + }, + "documentation":"

The scan results aggregated by target resource filter criteria.

" + }, + "CisScanResultsAggregatedByTargetResourceSortBy":{ + "type":"string", + "enum":[ + "RESOURCE_ID", + "FAILED_COUNTS", + "ACCOUNT_ID", + "PLATFORM", + "TARGET_STATUS", + "TARGET_STATUS_REASON" + ] + }, + "CisScanResultsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "CisScanStatus":{ + "type":"string", + "enum":[ + "FAILED", + "COMPLETED", + "CANCELLED", + "IN_PROGRESS" + ] + }, + "CisScanStatusComparison":{ + "type":"string", + "enum":["EQUALS"] + }, + "CisScanStatusFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"CisScanStatusComparison", + "documentation":"

The filter comparison value.

" + }, + "value":{ + "shape":"CisScanStatus", + "documentation":"

The filter value.

" + } + }, + "documentation":"

The CIS scan status filter.

" + }, + "CisScanStatusFilterList":{ + "type":"list", + "member":{"shape":"CisScanStatusFilter"}, + "max":10, + "min":1 + }, + "CisScheduledByFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, + "CisSecurityLevel":{ + "type":"string", + "enum":[ + "LEVEL_1", + "LEVEL_2" + ] + }, + "CisSecurityLevelComparison":{ + "type":"string", + "enum":["EQUALS"] + }, + "CisSecurityLevelFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"CisSecurityLevelComparison", + "documentation":"

The CIS security filter comparison value.

" + }, + "value":{ + "shape":"CisSecurityLevel", + "documentation":"

The CIS security filter value.

" + } + }, + "documentation":"

The CIS security level filter. Security level refers to the Benchmark levels that CIS assigns to a profile.

" + }, + "CisSecurityLevelFilterList":{ + "type":"list", + "member":{"shape":"CisSecurityLevelFilter"}, + "max":10, + "min":1 + }, + "CisSessionMessage":{ + "type":"structure", + "required":[ + "cisRuleDetails", + "ruleId", + "status" + ], + "members":{ + "cisRuleDetails":{ + "shape":"CisRuleDetails", + "documentation":"

The CIS rule details for the CIS session message.

" + }, + "ruleId":{ + "shape":"RuleId", + "documentation":"

The rule ID for the CIS session message.

" + }, + "status":{ + "shape":"CisRuleStatus", + "documentation":"

The status of the CIS session message.

" + } + }, + "documentation":"

The CIS session message.

" + }, + "CisSessionMessages":{ + "type":"list", + "member":{"shape":"CisSessionMessage"}, + "max":150, + "min":1 + }, + "CisSortOrder":{ + "type":"string", + "enum":[ + "ASC", + "DESC" + ] + }, + "CisStringComparison":{ + "type":"string", + "enum":[ + "EQUALS", + "PREFIX", + "NOT_EQUALS" + ] + }, + "CisStringFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"CisStringComparison", + "documentation":"

The comparison value of the CIS string filter.

" + }, + "value":{ + "shape":"String", + "documentation":"

The value of the CIS string filter.

" + } + }, + "documentation":"

The CIS string filter.

" + }, + "CisTagMap":{ + "type":"map", + "key":{"shape":"MapKey"}, + "value":{"shape":"MapValue"} + }, + "CisTargetResourceAggregation":{ + "type":"structure", + "required":["scanArn"], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

The account ID for the CIS target resource.

" + }, + "platform":{ + "shape":"String", + "documentation":"

The platform for the CIS target resource.

" + }, + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The scan ARN for the CIS target resource.

" + }, + "statusCounts":{ + "shape":"StatusCounts", + "documentation":"

The target resource status counts.

" + }, + "targetResourceId":{ + "shape":"ResourceId", + "documentation":"

The ID of the target resource.

" + }, + "targetResourceTags":{ + "shape":"TargetResourceTags", + "documentation":"

The tag for the target resource.

" + }, + "targetStatus":{ + "shape":"CisTargetStatus", + "documentation":"

The status of the target resource.

" + }, + "targetStatusReason":{ + "shape":"CisTargetStatusReason", + "documentation":"

The reason for the target resource.

" + } + }, + "documentation":"

The CIS target resource aggregation.

" + }, + "CisTargetResourceAggregationList":{ + "type":"list", + "member":{"shape":"CisTargetResourceAggregation"}, + "max":1000, + "min":1 + }, + "CisTargetStatus":{ + "type":"string", + "enum":[ + "TIMED_OUT", + "CANCELLED", + "COMPLETED" + ] + }, + "CisTargetStatusComparison":{ + "type":"string", + "enum":["EQUALS"] + }, + "CisTargetStatusFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"CisTargetStatusComparison", + "documentation":"

The comparison value of the CIS target status filter.

" + }, + "value":{ + "shape":"CisTargetStatus", + "documentation":"

The value of the CIS target status filter.

" + } + }, + "documentation":"

The CIS target status filter.

" + }, + "CisTargetStatusReason":{ + "type":"string", + "enum":[ + "SCAN_IN_PROGRESS", + "UNSUPPORTED_OS", + "SSM_UNMANAGED" + ] + }, + "CisTargetStatusReasonFilter":{ + "type":"structure", + "required":[ + "comparison", + "value" + ], + "members":{ + "comparison":{ + "shape":"CisTargetStatusComparison", + "documentation":"

The comparison value of the CIS target status reason filter.

" + }, + "value":{ + "shape":"CisTargetStatusReason", + "documentation":"

The value of the CIS target status reason filter.

" + } + }, + "documentation":"

The CIS target status reason filter.

" + }, + "CisTargets":{ + "type":"structure", + "members":{ + "accountIds":{ + "shape":"CisAccountIdList", + "documentation":"

The CIS target account ids.

" + }, + "targetResourceTags":{ + "shape":"TargetResourceTags", + "documentation":"

The CIS target resource tags.

" + } + }, + "documentation":"

The CIS targets.

" + }, "CisaAction":{ "type":"string", "min":0 @@ -1851,6 +2899,24 @@ }, "Component":{"type":"string"}, "ComponentType":{"type":"string"}, + "ComputePlatform":{ + "type":"structure", + "members":{ + "product":{ + "shape":"Product", + "documentation":"

The compute platform product.

" + }, + "vendor":{ + "shape":"Vendor", + "documentation":"

The compute platform vendor.

" + }, + "version":{ + "shape":"PlatformVersion", + "documentation":"

The compute platform version.

" + } + }, + "documentation":"

A compute platform.

" + }, "ConflictException":{ "type":"structure", "required":[ @@ -1935,17 +3001,21 @@ "shape":"CoverageStringFilterList", "documentation":"

The Amazon ECR repository name to filter on.

" }, + "imagePulledAt":{ + "shape":"CoverageDateFilterList", + "documentation":"

The date an image was last pulled at.

" + }, "lambdaFunctionName":{ "shape":"CoverageStringFilterList", - "documentation":"

Returns coverage statistics for AWS Lambda functions filtered by function names.

" + "documentation":"

Returns coverage statistics for Amazon Web Services Lambda functions filtered by function names.

" }, "lambdaFunctionRuntime":{ "shape":"CoverageStringFilterList", - "documentation":"

Returns coverage statistics for AWS Lambda functions filtered by runtime.

" + "documentation":"

Returns coverage statistics for Amazon Web Services Lambda functions filtered by runtime.

" }, "lambdaFunctionTags":{ "shape":"CoverageMapFilterList", - "documentation":"

Returns coverage statistics for AWS Lambda functions filtered by tag.

" + "documentation":"

Returns coverage statistics for Amazon Web Services Lambda functions filtered by tag.

" }, "lastScannedAt":{ "shape":"CoverageDateFilterList", @@ -1957,7 +3027,7 @@ }, "resourceType":{ "shape":"CoverageStringFilterList", - "documentation":"

An array of Amazon Web Services resource types to return coverage statistics for. The values can be AWS_EC2_INSTANCE, AWS_LAMBDA_FUNCTION or AWS_ECR_REPOSITORY.

" + "documentation":"

An array of Amazon Web Services resource types to return coverage statistics for. The values can be AWS_EC2_INSTANCE, AWS_LAMBDA_FUNCTION, AWS_ECR_CONTAINER_IMAGE, AWS_ECR_REPOSITORY or AWS_ACCOUNT.

" }, "scanStatusCode":{ "shape":"CoverageStringFilterList", @@ -2095,6 +3165,64 @@ "type":"list", "member":{"shape":"CoveredResource"} }, + "CreateCisScanConfigurationRequest":{ + "type":"structure", + "required":[ + "scanName", + "schedule", + "securityLevel", + "targets" + ], + "members":{ + "scanName":{ + "shape":"CisScanName", + "documentation":"

The scan name for the CIS scan configuration.

" + }, + "schedule":{ + "shape":"Schedule", + "documentation":"

The schedule for the CIS scan configuration.

" + }, + "securityLevel":{ + "shape":"CisSecurityLevel", + "documentation":"

The security level for the CIS scan configuration. Security level refers to the Benchmark levels that CIS assigns to a profile.

" + }, + "tags":{ + "shape":"CisTagMap", + "documentation":"

The tags for the CIS scan configuration.

" + }, + "targets":{ + "shape":"CreateCisTargets", + "documentation":"

The targets for the CIS scan configuration.

" + } + } + }, + "CreateCisScanConfigurationResponse":{ + "type":"structure", + "members":{ + "scanConfigurationArn":{ + "shape":"CisScanConfigurationArn", + "documentation":"

The scan configuration ARN for the CIS scan configuration.

" + } + } + }, + "CreateCisTargets":{ + "type":"structure", + "required":[ + "accountIds", + "targetResourceTags" + ], + "members":{ + "accountIds":{ + "shape":"TargetAccountList", + "documentation":"

The CIS target account ids.

" + }, + "targetResourceTags":{ + "shape":"TargetResourceTags", + "documentation":"

The CIS target resource tags.

" + } + }, + "documentation":"

Creates CIS targets.

" + }, "CreateFilterRequest":{ "type":"structure", "required":[ @@ -2343,6 +3471,17 @@ "member":{"shape":"Cwe"}, "min":0 }, + "DailySchedule":{ + "type":"structure", + "required":["startTime"], + "members":{ + "startTime":{ + "shape":"Time", + "documentation":"

The schedule start time.

" + } + }, + "documentation":"

A daily schedule.

" + }, "DateFilter":{ "type":"structure", "members":{ @@ -2364,6 +3503,24 @@ "min":1 }, "DateTimeTimestamp":{"type":"timestamp"}, + "Day":{ + "type":"string", + "enum":[ + "SUN", + "MON", + "TUE", + "WED", + "THU", + "FRI", + "SAT" + ] + }, + "DaysList":{ + "type":"list", + "member":{"shape":"Day"}, + "max":7, + "min":1 + }, "DelegatedAdmin":{ "type":"structure", "members":{ @@ -2405,6 +3562,26 @@ "DISABLE_IN_PROGRESS" ] }, + "DeleteCisScanConfigurationRequest":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "scanConfigurationArn":{ + "shape":"CisScanConfigurationArn", + "documentation":"

The ARN of the CIS scan configuration.

" + } + } + }, + "DeleteCisScanConfigurationResponse":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "scanConfigurationArn":{ + "shape":"CisScanConfigurationArn", + "documentation":"

The ARN of the CIS scan configuration.

" + } + } + }, "DeleteFilterRequest":{ "type":"structure", "required":["arn"], @@ -2668,9 +3845,13 @@ "type":"structure", "required":["rescanDuration"], "members":{ + "pullDateRescanDuration":{ + "shape":"EcrPullDateRescanDuration", + "documentation":"

The rescan duration configured for image pull date.

" + }, "rescanDuration":{ "shape":"EcrRescanDuration", - "documentation":"

The ECR automated re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the automated re-scan duration the monitoring state of that image becomes inactive and all associated findings are scheduled for closure.

" + "documentation":"

The rescan duration configured for image push date.

" } }, "documentation":"

Details about the ECR automated re-scan duration setting for your environment.

" @@ -2680,7 +3861,7 @@ "members":{ "rescanDurationState":{ "shape":"EcrRescanDurationState", - "documentation":"

An object that contains details about the state of the ECR automated re-scan setting.

" + "documentation":"

An object that contains details about the state of the ECR re-scan settings.

" } }, "documentation":"

Details about the state of the ECR scans for your environment.

" @@ -2688,6 +3869,10 @@ "EcrContainerImageMetadata":{ "type":"structure", "members":{ + "imagePulledAt":{ + "shape":"DateTimeTimestamp", + "documentation":"

The date an image was last pulled at.

" + }, "tags":{ "shape":"TagList", "documentation":"

Tags associated with the Amazon ECR image metadata.

" @@ -2695,6 +3880,16 @@ }, "documentation":"

Information on the Amazon ECR image metadata associated with a finding.

" }, + "EcrPullDateRescanDuration":{ + "type":"string", + "enum":[ + "DAYS_14", + "DAYS_30", + "DAYS_60", + "DAYS_90", + "DAYS_180" + ] + }, "EcrRepositoryMetadata":{ "type":"structure", "members":{ @@ -2714,15 +3909,22 @@ "enum":[ "LIFETIME", "DAYS_30", - "DAYS_180" + "DAYS_180", + "DAYS_14", + "DAYS_60", + "DAYS_90" ] }, "EcrRescanDurationState":{ "type":"structure", "members":{ + "pullDateRescanDuration":{ + "shape":"EcrPullDateRescanDuration", + "documentation":"

The rescan duration configured for image pull date.

" + }, "rescanDuration":{ "shape":"EcrRescanDuration", - "documentation":"

The ECR automated re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the automated re-scan duration the monitoring state of that image becomes inactive and all associated findings are scheduled for closure.

" + "documentation":"

The rescan duration configured for image push date.

 </p> 
" }, "status":{ "shape":"EcrRescanDurationStatus", @@ -2733,7 +3935,7 @@ "documentation":"

A timestamp representing when the last time the ECR scan duration setting was changed.

" } }, - "documentation":"

Details about the state of any changes to the ECR automated re-scan duration setting.

" + "documentation":"

Details about the state of your ECR re-scan duration settings. The ECR re-scan duration defines how long an ECR image will be actively scanned by Amazon Inspector. When the number of days since an image was last pushed exceeds the duration configured for image pull date, and the duration configured for image pull date, the monitoring state of that image becomes inactive and all associated findings are scheduled for closure.

" }, "EcrRescanDurationStatus":{ "type":"string", @@ -3146,7 +4348,7 @@ }, "exploitAvailable":{ "shape":"StringFilterList", - "documentation":"

Filters the list of AWS Lambda findings by the availability of exploits.

" + "documentation":"

Filters the list of Amazon Web Services Lambda findings by the availability of exploits.

" }, "findingArn":{ "shape":"StringFilterList", @@ -3174,23 +4376,23 @@ }, "lambdaFunctionExecutionRoleArn":{ "shape":"StringFilterList", - "documentation":"

Filters the list of AWS Lambda functions by execution role.

" + "documentation":"

Filters the list of Amazon Web Services Lambda functions by execution role.

" }, "lambdaFunctionLastModifiedAt":{ "shape":"DateFilterList", - "documentation":"

Filters the list of AWS Lambda functions by the date and time that a user last updated the configuration, in ISO 8601 format

" + "documentation":"

Filters the list of Amazon Web Services Lambda functions by the date and time that a user last updated the configuration, in ISO 8601 format

" }, "lambdaFunctionLayers":{ "shape":"StringFilterList", - "documentation":"

Filters the list of AWS Lambda functions by the function's layers. A Lambda function can have up to five layers.

" + "documentation":"

Filters the list of Amazon Web Services Lambda functions by the function's layers. A Lambda function can have up to five layers.

" }, "lambdaFunctionName":{ "shape":"StringFilterList", - "documentation":"

Filters the list of AWS Lambda functions by the name of the function.

" + "documentation":"

Filters the list of Amazon Web Services Lambda functions by the name of the function.

" }, "lambdaFunctionRuntime":{ "shape":"StringFilterList", - "documentation":"

Filters the list of AWS Lambda functions by the runtime environment for the Lambda function.

" + "documentation":"

Filters the list of Amazon Web Services Lambda functions by the runtime environment for the Lambda function.

" }, "lastObservedAt":{ "shape":"DateFilterList", @@ -3658,6 +4860,94 @@ "type":"string", "pattern":"^[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" }, + "GetCisScanReportRequest":{ + "type":"structure", + "required":["scanArn"], + "members":{ + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The scan ARN.

" + }, + "targetAccounts":{ + "shape":"ReportTargetAccounts", + "documentation":"

The target accounts.

" + } + } + }, + "GetCisScanReportResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"CisReportStatus", + "documentation":"

The status.

" + }, + "url":{ + "shape":"String", + "documentation":"

The URL where the CIS scan report PDF can be downloaded.

" + } + } + }, + "GetCisScanResultDetailsMaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "GetCisScanResultDetailsRequest":{ + "type":"structure", + "required":[ + "accountId", + "scanArn", + "targetResourceId" + ], + "members":{ + "accountId":{ + "shape":"AccountId", + "documentation":"

The account ID.

" + }, + "filterCriteria":{ + "shape":"CisScanResultDetailsFilterCriteria", + "documentation":"

The filter criteria.

" + }, + "maxResults":{ + "shape":"GetCisScanResultDetailsMaxResults", + "documentation":"

The maximum number of CIS scan result details to be returned in a single page of results.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The scan ARN.

" + }, + "sortBy":{ + "shape":"CisScanResultDetailsSortBy", + "documentation":"

The sort by order.

" + }, + "sortOrder":{ + "shape":"CisSortOrder", + "documentation":"

The sort order.

" + }, + "targetResourceId":{ + "shape":"ResourceId", + "documentation":"

The target resource ID.

" + } + } + }, + "GetCisScanResultDetailsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "scanResultDetails":{ + "shape":"CisScanResultDetailsList", + "documentation":"

The scan result details.

" + } + } + }, "GetConfigurationRequest":{ "type":"structure", "members":{ @@ -3986,7 +5276,7 @@ "members":{ "functionNames":{ "shape":"StringFilterList", - "documentation":"

The AWS Lambda function names to include in the aggregation results.

" + "documentation":"

The Amazon Web Services Lambda function names to include in the aggregation results.

" }, "functionTags":{ "shape":"MapFilterList", @@ -3998,7 +5288,7 @@ }, "runtimes":{ "shape":"StringFilterList", - "documentation":"

Returns findings aggregated by AWS Lambda function runtime environments.

" + "documentation":"

Returns findings aggregated by Amazon Web Services Lambda function runtime environments.

" }, "sortBy":{ "shape":"LambdaFunctionSortBy", @@ -4009,7 +5299,7 @@ "documentation":"

The order to use for sorting the results.

" } }, - "documentation":"

The details that define a findings aggregation based on AWS Lambda functions.

" + "documentation":"

The details that define a findings aggregation based on Amazon Web Services Lambda functions.

" }, "LambdaFunctionAggregationResponse":{ "type":"structure", @@ -4017,11 +5307,11 @@ "members":{ "accountId":{ "shape":"AccountId", - "documentation":"

The ID of the AWS account that owns the AWS Lambda function.

" + "documentation":"

The ID of the Amazon Web Services account that owns the Amazon Web Services Lambda function.

" }, "functionName":{ "shape":"String", - "documentation":"

The AWS Lambda function names included in the aggregation results.

" + "documentation":"

The Amazon Web Services Lambda function names included in the aggregation results.

" }, "lambdaTags":{ "shape":"TagMap", @@ -4029,7 +5319,7 @@ }, "lastModifiedAt":{ "shape":"DateTimeTimestamp", - "documentation":"

The date that the AWS Lambda function included in the aggregation results was last changed.

" + "documentation":"

The date that the Amazon Web Services Lambda function included in the aggregation results was last changed.

" }, "resourceId":{ "shape":"NonEmptyString", @@ -4041,7 +5331,7 @@ }, "severityCounts":{"shape":"SeverityCounts"} }, - "documentation":"

A response that contains the results of an AWS Lambda function finding aggregation.

" + "documentation":"

A response that contains the results of an Amazon Web Services Lambda function finding aggregation.

" }, "LambdaFunctionMetadata":{ "type":"structure", @@ -4052,18 +5342,18 @@ }, "functionTags":{ "shape":"TagMap", - "documentation":"

The resource tags on an AWS Lambda function.

" + "documentation":"

The resource tags on an Amazon Web Services Lambda function.

" }, "layers":{ "shape":"LambdaLayerList", - "documentation":"

The layers for an AWS Lambda function. A Lambda function can have up to five layers.

" + "documentation":"

The layers for an Amazon Web Services Lambda function. A Lambda function can have up to five layers.

" }, "runtime":{ "shape":"Runtime", - "documentation":"

An AWS Lambda function's runtime.

" + "documentation":"

An Amazon Web Services Lambda function's runtime.

" } }, - "documentation":"

The AWS Lambda function metadata.

" + "documentation":"

The Amazon Web Services Lambda function metadata.

" }, "LambdaFunctionSortBy":{ "type":"string", @@ -4078,15 +5368,15 @@ "members":{ "functionNames":{ "shape":"StringFilterList", - "documentation":"

The names of the AWS Lambda functions associated with the layers.

" + "documentation":"

The names of the Amazon Web Services Lambda functions associated with the layers.

" }, "layerArns":{ "shape":"StringFilterList", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Lambda function layer.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Lambda function layer.

" }, "resourceIds":{ "shape":"StringFilterList", - "documentation":"

The resource IDs for the AWS Lambda function layers.

" + "documentation":"

The resource IDs for the Amazon Web Services Lambda function layers.

" }, "sortBy":{ "shape":"LambdaLayerSortBy", @@ -4097,7 +5387,7 @@ "documentation":"

The order to use for sorting the results.

" } }, - "documentation":"

The details that define a findings aggregation based on an AWS Lambda function's layers.

" + "documentation":"

The details that define a findings aggregation based on an Amazon Web Services Lambda function's layers.

" }, "LambdaLayerAggregationResponse":{ "type":"structure", @@ -4110,23 +5400,23 @@ "members":{ "accountId":{ "shape":"AccountId", - "documentation":"

The account ID of the AWS Lambda function layer.

" + "documentation":"

The account ID of the Amazon Web Services Lambda function layer.

" }, "functionName":{ "shape":"NonEmptyString", - "documentation":"

The names of the AWS Lambda functions associated with the layers.

" + "documentation":"

The names of the Amazon Web Services Lambda functions associated with the layers.

" }, "layerArn":{ "shape":"NonEmptyString", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Lambda function layer.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Lambda function layer.

" }, "resourceId":{ "shape":"NonEmptyString", - "documentation":"

The Resource ID of the AWS Lambda function layer.

" + "documentation":"

The Resource ID of the Amazon Web Services Lambda function layer.

" }, "severityCounts":{"shape":"SeverityCounts"} }, - "documentation":"

A response that contains the results of an AWS Lambda function layer finding aggregation.

" + "documentation":"

A response that contains the results of an Amazon Web Services Lambda function layer finding aggregation.

" }, "LambdaLayerArn":{ "type":"string", @@ -4151,7 +5441,7 @@ "members":{ "securityGroupIds":{ "shape":"SecurityGroupIdList", - "documentation":"

The VPC security groups and subnets that are attached to an AWS Lambda function. For more information, see VPC Settings.

" + "documentation":"

The VPC security groups and subnets that are attached to an Amazon Web Services Lambda function. For more information, see VPC Settings.

" }, "subnetIds":{ "shape":"SubnetIdList", @@ -4162,7 +5452,7 @@ "documentation":"

The ID of the VPC.

" } }, - "documentation":"

The VPC security groups and subnets that are attached to an AWS Lambda function. For more information, see VPC Settings.

" + "documentation":"

The VPC security groups and subnets that are attached to an Amazon Web Services Lambda function. For more information, see VPC Settings.

" }, "LastSeen":{"type":"timestamp"}, "LayerList":{ @@ -4208,6 +5498,264 @@ } } }, + "ListCisScanConfigurationsFilterCriteria":{ + "type":"structure", + "members":{ + "scanConfigurationArnFilters":{ + "shape":"CisScanConfigurationArnFilterList", + "documentation":"

The list of scan configuration ARN filters.

" + }, + "scanNameFilters":{ + "shape":"CisScanNameFilterList", + "documentation":"

The list of scan name filters.

" + }, + "targetResourceTagFilters":{ + "shape":"ResourceTagFilterList", + "documentation":"

The list of target resource tag filters.

" + } + }, + "documentation":"

A list of CIS scan configurations filter criteria.

" + }, + "ListCisScanConfigurationsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCisScanConfigurationsRequest":{ + "type":"structure", + "members":{ + "filterCriteria":{ + "shape":"ListCisScanConfigurationsFilterCriteria", + "documentation":"

The CIS scan configuration filter criteria.

" + }, + "maxResults":{ + "shape":"ListCisScanConfigurationsMaxResults", + "documentation":"

The maximum number of CIS scan configurations to be returned in a single page of results.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "sortBy":{ + "shape":"CisScanConfigurationsSortBy", + "documentation":"

The CIS scan configuration sort by order.

" + }, + "sortOrder":{ + "shape":"CisSortOrder", + "documentation":"

The CIS scan configuration sort order order.

" + } + } + }, + "ListCisScanConfigurationsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "scanConfigurations":{ + "shape":"CisScanConfigurationList", + "documentation":"

The CIS scan configuration scan configurations.

" + } + } + }, + "ListCisScanResultsAggregatedByChecksRequest":{ + "type":"structure", + "required":["scanArn"], + "members":{ + "filterCriteria":{ + "shape":"CisScanResultsAggregatedByChecksFilterCriteria", + "documentation":"

The filter criteria.

" + }, + "maxResults":{ + "shape":"CisScanResultsMaxResults", + "documentation":"

The maximum number of scan results aggregated by checks to be returned in a single page of results.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The scan ARN.

" + }, + "sortBy":{ + "shape":"CisScanResultsAggregatedByChecksSortBy", + "documentation":"

The sort by order.

" + }, + "sortOrder":{ + "shape":"CisSortOrder", + "documentation":"

The sort order.

" + } + } + }, + "ListCisScanResultsAggregatedByChecksResponse":{ + "type":"structure", + "members":{ + "checkAggregations":{ + "shape":"CisCheckAggregationList", + "documentation":"

The check aggregations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + } + } + }, + "ListCisScanResultsAggregatedByTargetResourceRequest":{ + "type":"structure", + "required":["scanArn"], + "members":{ + "filterCriteria":{ + "shape":"CisScanResultsAggregatedByTargetResourceFilterCriteria", + "documentation":"

The filter criteria.

" + }, + "maxResults":{ + "shape":"CisScanResultsMaxResults", + "documentation":"

The maximum number of scan results aggregated by a target resource to be returned in a single page of results.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "scanArn":{ + "shape":"CisScanArn", + "documentation":"

The scan ARN.

" + }, + "sortBy":{ + "shape":"CisScanResultsAggregatedByTargetResourceSortBy", + "documentation":"

The sort by order.

" + }, + "sortOrder":{ + "shape":"CisSortOrder", + "documentation":"

The sort order.

" + } + } + }, + "ListCisScanResultsAggregatedByTargetResourceResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "targetResourceAggregations":{ + "shape":"CisTargetResourceAggregationList", + "documentation":"

The resource aggregations.

" + } + } + }, + "ListCisScansDetailLevel":{ + "type":"string", + "enum":[ + "ORGANIZATION", + "MEMBER" + ] + }, + "ListCisScansFilterCriteria":{ + "type":"structure", + "members":{ + "failedChecksFilters":{ + "shape":"CisNumberFilterList", + "documentation":"

The list of failed checks filters.

" + }, + "scanArnFilters":{ + "shape":"CisScanArnFilterList", + "documentation":"

The list of scan ARN filters.

" + }, + "scanAtFilters":{ + "shape":"CisScanDateFilterList", + "documentation":"

The list of scan at filters.

" + }, + "scanConfigurationArnFilters":{ + "shape":"CisScanConfigurationArnFilterList", + "documentation":"

The list of scan configuration ARN filters.

" + }, + "scanNameFilters":{ + "shape":"CisScanNameFilterList", + "documentation":"

The list of scan name filters.

" + }, + "scanStatusFilters":{ + "shape":"CisScanStatusFilterList", + "documentation":"

The list of scan status filters.

" + }, + "scheduledByFilters":{ + "shape":"CisScheduledByFilterList", + "documentation":"

The list of scheduled by filters.

" + }, + "targetAccountIdFilters":{ + "shape":"AccountIdFilterList", + "documentation":"

The list of target account ID filters.

" + }, + "targetResourceIdFilters":{ + "shape":"ResourceIdFilterList", + "documentation":"

The list of target resource ID filters.

" + }, + "targetResourceTagFilters":{ + "shape":"ResourceTagFilterList", + "documentation":"

The list of target resource tag filters.

" + } + }, + "documentation":"

A list of CIS scans filter criteria.

" + }, + "ListCisScansMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListCisScansRequest":{ + "type":"structure", + "members":{ + "detailLevel":{ + "shape":"ListCisScansDetailLevel", + "documentation":"

The detail applied to the CIS scan.

" + }, + "filterCriteria":{ + "shape":"ListCisScansFilterCriteria", + "documentation":"

The CIS scan filter criteria.

" + }, + "maxResults":{ + "shape":"ListCisScansMaxResults", + "documentation":"

The maximum number of results to be returned.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "sortBy":{ + "shape":"ListCisScansSortBy", + "documentation":"

The CIS scans sort by order.

" + }, + "sortOrder":{ + "shape":"CisSortOrder", + "documentation":"

The CIS scans sort order.

" + } + } + }, + "ListCisScansResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token from a previous request that's used to retrieve the next page of results.

" + }, + "scans":{ + "shape":"CisScanList", + "documentation":"

The CIS scans.

" + } + } + }, + "ListCisScansSortBy":{ + "type":"string", + "enum":[ + "STATUS", + "SCHEDULED_BY", + "SCAN_START_DATE", + "FAILED_CHECKS" + ] + }, "ListCoverageMaxResults":{ "type":"integer", "box":true, @@ -4670,6 +6218,24 @@ "type":"double", "min":0 }, + "MonthlySchedule":{ + "type":"structure", + "required":[ + "day", + "startTime" + ], + "members":{ + "day":{ + "shape":"Day", + "documentation":"

The monthly schedule's day.

" + }, + "startTime":{ + "shape":"Time", + "documentation":"

The monthly schedule's start time.

" + } + }, + "documentation":"

A monthly schedule.

" + }, "NetworkPath":{ "type":"structure", "members":{ @@ -4743,6 +6309,18 @@ "max":10, "min":1 }, + "OneAccountIdFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":1, + "min":1 + }, + "OneTimeSchedule":{ + "type":"structure", + "members":{ + }, + "documentation":"

A one time schedule.

" + }, "Operation":{ "type":"string", "enum":[ @@ -4981,6 +6559,17 @@ "max":1024, "min":1 }, + "PlatformFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, + "PlatformVersion":{ + "type":"string", + "max":8, + "min":0 + }, "Port":{ "type":"integer", "box":true, @@ -5025,6 +6614,16 @@ "max":10, "min":1 }, + "Product":{ + "type":"string", + "max":32, + "min":0 + }, + "Reason":{ + "type":"string", + "max":1024, + "min":0 + }, "Recommendation":{ "type":"structure", "members":{ @@ -5093,6 +6692,12 @@ "type":"string", "pattern":"\\b[a-f0-9]{8}\\b-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-\\b[a-f0-9]{12}\\b" }, + "ReportTargetAccounts":{ + "type":"list", + "member":{"shape":"AccountId"}, + "max":1, + "min":0 + }, "ReportingErrorCode":{ "type":"string", "enum":[ @@ -5223,7 +6828,7 @@ }, "awsLambdaFunction":{ "shape":"AwsLambdaFunctionDetails", - "documentation":"

A summary of the information about an AWS Lambda function affected by a finding.

" + "documentation":"

A summary of the information about an Amazon Web Services Lambda function affected by a finding.

" } }, "documentation":"

Contains details about the resource involved in the finding.

" @@ -5249,11 +6854,11 @@ }, "lambdaFunctionName":{ "shape":"ResourceStringFilterList", - "documentation":"

The AWS Lambda function name used as resource filter criteria.

" + "documentation":"

The Amazon Web Services Lambda function name used as resource filter criteria.

" }, "lambdaFunctionTags":{ "shape":"ResourceMapFilterList", - "documentation":"

The AWS Lambda function tags used as resource filter criteria.

" + "documentation":"

The Amazon Web Services Lambda function tags used as resource filter criteria.

" }, "resourceId":{ "shape":"ResourceStringFilterList", @@ -5272,6 +6877,12 @@ "min":10, "pattern":"(^arn:.*:ecr:.*:\\d{12}:repository\\/(?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*(\\/sha256:[a-z0-9]{64})?$)|(^i-([a-z0-9]{8}|[a-z0-9]{17}|\\\\*)$|(^arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?$))" }, + "ResourceIdFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, "ResourceList":{ "type":"list", "member":{"shape":"Resource"}, @@ -5340,7 +6951,7 @@ }, "lambdaFunction":{ "shape":"LambdaFunctionMetadata", - "documentation":"

An object that contains metadata details for an AWS Lambda function.

" + "documentation":"

An object that contains metadata details for an Amazon Web Services Lambda function.

" } }, "documentation":"

An object that contains details about the metadata for an Amazon ECR resource.

" @@ -5391,7 +7002,7 @@ }, "lambda":{ "shape":"Status", - "documentation":"

The status of Amazon Inspector scanning for AWS Lambda function.

" + "documentation":"

The status of Amazon Inspector scanning for Amazon Web Services Lambda function.

" }, "lambdaCode":{ "shape":"Status", @@ -5436,6 +7047,12 @@ "max":1024, "min":1 }, + "ResourceTagFilterList":{ + "type":"list", + "member":{"shape":"TagFilter"}, + "max":10, + "min":1 + }, "ResourceType":{ "type":"string", "enum":[ @@ -5449,6 +7066,11 @@ "type":"integer", "box":true }, + "RuleId":{ + "type":"string", + "max":500, + "min":1 + }, "Runtime":{ "type":"string", "enum":[ @@ -5538,6 +7160,29 @@ "CODE" ] }, + "Schedule":{ + "type":"structure", + "members":{ + "daily":{ + "shape":"DailySchedule", + "documentation":"

The schedule's daily.

" + }, + "monthly":{ + "shape":"MonthlySchedule", + "documentation":"

The schedule's monthly.

" + }, + "oneTime":{ + "shape":"OneTimeSchedule", + "documentation":"

The schedule's one time.

" + }, + "weekly":{ + "shape":"WeeklySchedule", + "documentation":"

The schedule's weekly.

" + } + }, + "documentation":"

A schedule.

", + "union":true + }, "SearchVulnerabilitiesFilterCriteria":{ "type":"structure", "required":["vulnerabilityIds"], @@ -5587,6 +7232,55 @@ "max":5, "min":0 }, + "SendCisSessionHealthRequest":{ + "type":"structure", + "required":[ + "scanJobId", + "sessionToken" + ], + "members":{ + "scanJobId":{ + "shape":"UUID", + "documentation":"

A unique identifier for the scan job.

" + }, + "sessionToken":{ + "shape":"UUID", + "documentation":"

The unique token that identifies the CIS session.

" + } + } + }, + "SendCisSessionHealthResponse":{ + "type":"structure", + "members":{ + } + }, + "SendCisSessionTelemetryRequest":{ + "type":"structure", + "required":[ + "messages", + "scanJobId", + "sessionToken" + ], + "members":{ + "messages":{ + "shape":"CisSessionMessages", + "documentation":"

The CIS session telemetry messages.

" + }, + "scanJobId":{ + "shape":"UUID", + "documentation":"

A unique identifier for the scan job.

" + }, + "sessionToken":{ + "shape":"UUID", + "documentation":"

The unique token that identifies the CIS session.

" + } + } + }, + "SendCisSessionTelemetryResponse":{ + "type":"structure", + "members":{ + } + }, "Service":{ "type":"string", "enum":[ @@ -5701,6 +7395,39 @@ "min":71, "pattern":"^sha256:[a-z0-9]{64}$" }, + "StartCisSessionMessage":{ + "type":"structure", + "required":["sessionToken"], + "members":{ + "sessionToken":{ + "shape":"UUID", + "documentation":"

The unique token that identifies the CIS session.

" + } + }, + "documentation":"

The start CIS session message.

" + }, + "StartCisSessionRequest":{ + "type":"structure", + "required":[ + "message", + "scanJobId" + ], + "members":{ + "message":{ + "shape":"StartCisSessionMessage", + "documentation":"

The start CIS session message.

" + }, + "scanJobId":{ + "shape":"UUID", + "documentation":"

A unique identifier for the scan job.

" + } + } + }, + "StartCisSessionResponse":{ + "type":"structure", + "members":{ + } + }, "State":{ "type":"structure", "required":[ @@ -5735,6 +7462,24 @@ "SUSPENDED" ] }, + "StatusCounts":{ + "type":"structure", + "members":{ + "failed":{ + "shape":"Integer", + "documentation":"

The number of checks that failed.

" + }, + "passed":{ + "shape":"Integer", + "documentation":"

The number of checks that passed.

" + }, + "skipped":{ + "shape":"Integer", + "documentation":"

The number of checks that were skipped.

" + } + }, + "documentation":"

The status counts.

" + }, "Step":{ "type":"structure", "required":[ @@ -5759,6 +7504,114 @@ "max":30, "min":1 }, + "StopCisMessageProgress":{ + "type":"structure", + "members":{ + "errorChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' error checks.

" + }, + "failedChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' failed checks.

" + }, + "informationalChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' informational checks.

" + }, + "notApplicableChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' not applicable checks.

" + }, + "notEvaluatedChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' not evaluated checks.

" + }, + "successfulChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' successful checks.

" + }, + "totalChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' total checks.

" + }, + "unknownChecks":{ + "shape":"CheckCount", + "documentation":"

The progress' unknown checks.

" + } + }, + "documentation":"

The stop CIS message progress.

" + }, + "StopCisSessionMessage":{ + "type":"structure", + "required":[ + "progress", + "status" + ], + "members":{ + "benchmarkProfile":{ + "shape":"BenchmarkProfile", + "documentation":"

The message benchmark profile.

" + }, + "benchmarkVersion":{ + "shape":"BenchmarkVersion", + "documentation":"

The message benchmark version.

" + }, + "computePlatform":{ + "shape":"ComputePlatform", + "documentation":"

The message compute platform.

" + }, + "progress":{ + "shape":"StopCisMessageProgress", + "documentation":"

The progress of the message.

" + }, + "reason":{ + "shape":"Reason", + "documentation":"

The reason for the message.

" + }, + "status":{ + "shape":"StopCisSessionStatus", + "documentation":"

The status of the message.

" + } + }, + "documentation":"

The stop CIS session message.

" + }, + "StopCisSessionRequest":{ + "type":"structure", + "required":[ + "message", + "scanJobId", + "sessionToken" + ], + "members":{ + "message":{ + "shape":"StopCisSessionMessage", + "documentation":"

The stop CIS session message.

" + }, + "scanJobId":{ + "shape":"UUID", + "documentation":"

A unique identifier for the scan job.

" + }, + "sessionToken":{ + "shape":"UUID", + "documentation":"

The unique token that identifies the CIS session.

" + } + } + }, + "StopCisSessionResponse":{ + "type":"structure", + "members":{ + } + }, + "StopCisSessionStatus":{ + "type":"string", + "enum":[ + "SUCCESS", + "FAILED", + "INTERRUPTED", + "UNSUPPORTED_OS" + ] + }, "String":{"type":"string"}, "StringComparison":{ "type":"string", @@ -5841,6 +7694,33 @@ "max":5, "min":1 }, + "TagComparison":{ + "type":"string", + "enum":["EQUALS"] + }, + "TagFilter":{ + "type":"structure", + "required":[ + "comparison", + "key", + "value" + ], + "members":{ + "comparison":{ + "shape":"TagComparison", + "documentation":"

The tag filter comparison value.

" + }, + "key":{ + "shape":"NonEmptyString", + "documentation":"

The tag filter key.

" + }, + "value":{ + "shape":"NonEmptyString", + "documentation":"

The tag filter value.

" + } + }, + "documentation":"

The tag filter.

" + }, "TagKey":{ "type":"string", "max":128, @@ -5886,11 +7766,46 @@ "members":{ } }, + "TagValueList":{ + "type":"list", + "member":{"shape":"String"}, + "max":5, + "min":1 + }, "Target":{ "type":"string", "max":50, "min":0 }, + "TargetAccount":{ + "type":"string", + "pattern":"^\\d{12}|ALL_ACCOUNTS|SELF$" + }, + "TargetAccountList":{ + "type":"list", + "member":{"shape":"TargetAccount"}, + "max":10000, + "min":1 + }, + "TargetResourceTags":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"TagValueList"}, + "max":5, + "min":1 + }, + "TargetStatusFilterList":{ + "type":"list", + "member":{"shape":"CisTargetStatusFilter"}, + "max":10, + "min":1 + }, + "TargetStatusReasonFilterList":{ + "type":"list", + "member":{"shape":"CisTargetStatusReasonFilter"}, + "max":10, + "min":1 + }, "Targets":{ "type":"list", "member":{"shape":"Target"}, @@ -5916,7 +7831,34 @@ "exception":true, "retryable":{"throttling":true} }, + "Time":{ + "type":"structure", + "required":[ + "timeOfDay", + "timezone" + ], + "members":{ + "timeOfDay":{ + "shape":"TimeOfDay", + "documentation":"

The time of day in 24-hour format (00:00).

" + }, + "timezone":{ + "shape":"Timezone", + "documentation":"

The timezone.

" + } + }, + "documentation":"

The time.

" + }, + "TimeOfDay":{ + "type":"string", + "pattern":"^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$" + }, "Timestamp":{"type":"timestamp"}, + "Timezone":{ + "type":"string", + "max":50, + "min":1 + }, "TitleAggregation":{ "type":"structure", "members":{ @@ -5970,6 +7912,12 @@ }, "documentation":"

A response that contains details on the results of a finding aggregation by title.

" }, + "TitleFilterList":{ + "type":"list", + "member":{"shape":"CisStringFilter"}, + "max":10, + "min":1 + }, "TitleSortBy":{ "type":"string", "enum":[ @@ -5996,6 +7944,10 @@ "member":{"shape":"Ttp"}, "min":0 }, + "UUID":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -6022,6 +7974,56 @@ "members":{ } }, + "UpdateCisScanConfigurationRequest":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "scanConfigurationArn":{ + "shape":"CisScanConfigurationArn", + "documentation":"

The CIS scan configuration ARN.

" + }, + "scanName":{ + "shape":"CisScanName", + "documentation":"

The scan name for the CIS scan configuration.

" + }, + "schedule":{ + "shape":"Schedule", + "documentation":"

The schedule for the CIS scan configuration.

" + }, + "securityLevel":{ + "shape":"CisSecurityLevel", + "documentation":"

The security level for the CIS scan configuration. Security level refers to the Benchmark levels that CIS assigns to a profile.

" + }, + "targets":{ + "shape":"UpdateCisTargets", + "documentation":"

The targets for the CIS scan configuration.

" + } + } + }, + "UpdateCisScanConfigurationResponse":{ + "type":"structure", + "required":["scanConfigurationArn"], + "members":{ + "scanConfigurationArn":{ + "shape":"CisScanConfigurationArn", + "documentation":"

The CIS scan configuration ARN.

" + } + } + }, + "UpdateCisTargets":{ + "type":"structure", + "members":{ + "accountIds":{ + "shape":"TargetAccountList", + "documentation":"

The target account ids.

" + }, + "targetResourceTags":{ + "shape":"TargetResourceTags", + "documentation":"

The target resource tags.

" + } + }, + "documentation":"

Updates CIS targets.

" + }, "UpdateConfigurationRequest":{ "type":"structure", "required":["ecrConfiguration"], @@ -6295,6 +8297,11 @@ "OTHER" ] }, + "Vendor":{ + "type":"string", + "max":16, + "min":0 + }, "VendorCreatedAt":{"type":"timestamp"}, "VendorSeverity":{ "type":"string", @@ -6470,7 +8477,7 @@ }, "sourceLambdaLayerArn":{ "shape":"LambdaLayerArn", - "documentation":"

The Amazon Resource Number (ARN) of the AWS Lambda function affected by a finding.

" + "documentation":"

The Amazon Resource Number (ARN) of the Amazon Web Services Lambda function affected by a finding.

" }, "sourceLayerHash":{ "shape":"SourceLayerHash", @@ -6491,6 +8498,24 @@ "type":"string", "max":1024, "min":1 + }, + "WeeklySchedule":{ + "type":"structure", + "required":[ + "days", + "startTime" + ], + "members":{ + "days":{ + "shape":"DaysList", + "documentation":"

The weekly schedule's days.

" + }, + "startTime":{ + "shape":"Time", + "documentation":"

The weekly schedule's start time.

" + } + }, + "documentation":"

A weekly schedule.

" } }, "documentation":"

Amazon Inspector is a vulnerability discovery service that automates continuous scanning for security vulnerabilities within your Amazon EC2, Amazon ECR, and Amazon Web Services Lambda environments.

" diff -Nru awscli-2.15.9/awscli/botocore/data/iot/2015-05-28/service-2.json awscli-2.15.22/awscli/botocore/data/iot/2015-05-28/service-2.json --- awscli-2.15.9/awscli/botocore/data/iot/2015-05-28/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/iot/2015-05-28/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1714,7 +1714,7 @@ {"shape":"UnauthorizedException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns a unique endpoint specific to the Amazon Web Services account making the call.

Requires permission to access the DescribeEndpoint action.

" + "documentation":"

Returns or creates a unique endpoint specific to the Amazon Web Services account making the call.

The first time DescribeEndpoint is called, an endpoint is created. All subsequent calls to DescribeEndpoint return the same endpoint.

Requires permission to access the DescribeEndpoint action.

" }, "DescribeEventConfigurations":{ "name":"DescribeEventConfigurations", @@ -2382,7 +2382,7 @@ {"shape":"InternalFailureException"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Gets a registration code used to register a CA certificate with IoT.

Requires permission to access the GetRegistrationCode action.

" + "documentation":"

Gets a registration code used to register a CA certificate with IoT.

IoT will create a registration code as part of this API call if the registration code doesn't exist or has been deleted. If you already have a registration code, this API call will return the same registration code.

Requires permission to access the GetRegistrationCode action.

" }, "GetStatistics":{ "name":"GetStatistics", @@ -4154,6 +4154,7 @@ "output":{"shape":"UpdatePackageResponse"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} @@ -4172,6 +4173,7 @@ "output":{"shape":"UpdatePackageConfigurationResponse"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], @@ -4189,6 +4191,7 @@ "output":{"shape":"UpdatePackageVersionResponse"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} @@ -7008,6 +7011,10 @@ "tlsConfig":{ "shape":"TlsConfig", "documentation":"

An object that specifies the TLS configuration for a domain.

" + }, + "serverCertificateConfig":{ + "shape":"ServerCertificateConfig", + "documentation":"

The server certificate configuration.

" } } }, @@ -7230,7 +7237,7 @@ }, "destinationPackageVersions":{ "shape":"DestinationPackageVersions", - "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" + "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" } } }, @@ -7298,7 +7305,7 @@ }, "destinationPackageVersions":{ "shape":"DestinationPackageVersions", - "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" + "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" } } }, @@ -9483,6 +9490,10 @@ "tlsConfig":{ "shape":"TlsConfig", "documentation":"

An object that specifies the TLS configuration for a domain.

" + }, + "serverCertificateConfig":{ + "shape":"ServerCertificateConfig", + "documentation":"

The server certificate configuration.

" } } }, @@ -9742,7 +9753,7 @@ }, "destinationPackageVersions":{ "shape":"DestinationPackageVersions", - "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" + "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" } } }, @@ -10907,6 +10918,7 @@ }, "documentation":"

Parameters used when defining a mitigation action that enable Amazon Web Services IoT Core logging.

" }, + "EnableOCSPCheck":{"type":"boolean"}, "EnableTopicRuleRequest":{ "type":"structure", "required":["ruleName"], @@ -12328,7 +12340,7 @@ }, "destinationPackageVersions":{ "shape":"DestinationPackageVersions", - "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" + "documentation":"

The package version Amazon Resource Names (ARNs) that are installed on the device when the job successfully completes. The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.The package version must be in either the Published or Deprecated state when the job deploys. For more information, see Package version lifecycle.

Note:The following Length Constraints relates to a single ARN. Up to 25 package version ARNs are allowed.

" } }, "documentation":"

The Job object contains details about a job.

" @@ -15421,9 +15433,7 @@ "THING_GROUP", "CLIENT_ID", "SOURCE_IP", - "PRINCIPAL_ID", - "EVENT_TYPE", - "DEVICE_DEFENDER" + "PRINCIPAL_ID" ] }, "LoggingOptionsPayload":{ @@ -17620,6 +17630,16 @@ "max":1, "min":0 }, + "ServerCertificateConfig":{ + "type":"structure", + "members":{ + "enableOCSPCheck":{ + "shape":"EnableOCSPCheck", + "documentation":"

A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not.

For more information, see Configuring OCSP server-certificate stapling in domain configuration from Amazon Web Services IoT Core Developer Guide.

" + } + }, + "documentation":"

The server certificate configuration.

" + }, "ServerCertificateStatus":{ "type":"string", "enum":[ @@ -19818,6 +19838,10 @@ "tlsConfig":{ "shape":"TlsConfig", "documentation":"

An object that specifies the TLS configuration for a domain.

" + }, + "serverCertificateConfig":{ + "shape":"ServerCertificateConfig", + "documentation":"

The server certificate configuration.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/iotfleetwise/2021-06-17/service-2.json awscli-2.15.22/awscli/botocore/data/iotfleetwise/2021-06-17/service-2.json --- awscli-2.15.9/awscli/botocore/data/iotfleetwise/2021-06-17/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/iotfleetwise/2021-06-17/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1362,7 +1362,7 @@ "members":{ "expression":{ "shape":"eventExpression", - "documentation":"

The logical expression used to recognize what data to collect. For example, $variable.Vehicle.OutsideAirTemperature >= 105.0.

" + "documentation":"

The logical expression used to recognize what data to collect. For example, $variable.`Vehicle.OutsideAirTemperature` >= 105.0.

" }, "minimumTriggerIntervalMs":{ "shape":"uint32", @@ -3148,6 +3148,10 @@ "maxResults":{ "shape":"maxResults", "documentation":"

The maximum number of items to return, between 1 and 100, inclusive.

" + }, + "signalNodeType":{ + "shape":"SignalNodeType", + "documentation":"

The type of node in the signal catalog.

" } } }, @@ -3412,8 +3416,7 @@ "CAN_NETWORK_INTERFACE_INFO_IS_NULL", "OBD_NETWORK_INTERFACE_INFO_IS_NULL", "NETWORK_INTERFACE_TO_REMOVE_ASSOCIATED_WITH_SIGNALS", - "VEHICLE_MIDDLEWARE_NETWORK_INTERFACE_INFO_IS_NULL", - "CUSTOMER_DECODED_SIGNAL_NETWORK_INTERFACE_INFO_IS_NULL" + "VEHICLE_MIDDLEWARE_NETWORK_INTERFACE_INFO_IS_NULL" ] }, "NetworkInterfaceType":{ @@ -3421,8 +3424,7 @@ "enum":[ "CAN_INTERFACE", "OBD_INTERFACE", - "VEHICLE_MIDDLEWARE", - "CUSTOMER_DECODED_INTERFACE" + "VEHICLE_MIDDLEWARE" ] }, "NetworkInterfaces":{ @@ -4022,8 +4024,7 @@ "STRUCT_SIZE_MISMATCH", "NO_SIGNAL_IN_CATALOG_FOR_DECODER_SIGNAL", "SIGNAL_DECODER_INCOMPATIBLE_WITH_SIGNAL_CATALOG", - "EMPTY_MESSAGE_SIGNAL", - "CUSTOMER_DECODED_SIGNAL_INFO_IS_NULL" + "EMPTY_MESSAGE_SIGNAL" ] }, "SignalDecoderType":{ @@ -4031,8 +4032,7 @@ "enum":[ "CAN_SIGNAL", "OBD_SIGNAL", - "MESSAGE_SIGNAL", - "CUSTOMER_DECODED_SIGNAL" + "MESSAGE_SIGNAL" ] }, "SignalDecoders":{ @@ -4066,6 +4066,17 @@ "max":1000, "min":0 }, + "SignalNodeType":{ + "type":"string", + "enum":[ + "SENSOR", + "ACTUATOR", + "ATTRIBUTE", + "BRANCH", + "CUSTOM_STRUCT", + "CUSTOM_PROPERTY" + ] + }, "SpoolingMode":{ "type":"string", "enum":[ @@ -4858,6 +4869,10 @@ "lastModificationTime":{ "shape":"timestamp", "documentation":"

The time the vehicle was last updated in seconds since epoch (January 1, 1970 at midnight UTC time).

" + }, + "attributes":{ + "shape":"attributesMap", + "documentation":"

Static information about a vehicle in a key-value pair. For example:

\"engineType\" : \"1.3 L R2\"

" } }, "documentation":"

Information about a vehicle.

To return this information about vehicles in your account, you can use the API operation.

" diff -Nru awscli-2.15.9/awscli/botocore/data/ivs/2020-07-14/service-2.json awscli-2.15.22/awscli/botocore/data/ivs/2020-07-14/service-2.json --- awscli-2.15.9/awscli/botocore/data/ivs/2020-07-14/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/ivs/2020-07-14/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -70,6 +70,24 @@ ], "documentation":"

Creates a new channel and an associated stream key to start streaming.

" }, + "CreatePlaybackRestrictionPolicy":{ + "name":"CreatePlaybackRestrictionPolicy", + "http":{ + "method":"POST", + "requestUri":"/CreatePlaybackRestrictionPolicy", + "responseCode":200 + }, + "input":{"shape":"CreatePlaybackRestrictionPolicyRequest"}, + "output":{"shape":"CreatePlaybackRestrictionPolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"PendingVerification"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Creates a new playback restriction policy, for constraining playback by countries and/or origins.

" + }, "CreateRecordingConfiguration":{ "name":"CreateRecordingConfiguration", "http":{ @@ -141,6 +159,23 @@ ], "documentation":"

Deletes a specified authorization key pair. This invalidates future viewer tokens generated using the key pair’s privateKey. For more information, see Setting Up Private Channels in the Amazon IVS User Guide.

" }, + "DeletePlaybackRestrictionPolicy":{ + "name":"DeletePlaybackRestrictionPolicy", + "http":{ + "method":"POST", + "requestUri":"/DeletePlaybackRestrictionPolicy", + "responseCode":204 + }, + "input":{"shape":"DeletePlaybackRestrictionPolicyRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"PendingVerification"}, + {"shape":"ConflictException"} + ], + "documentation":"

Deletes the specified playback restriction policy.

" + }, "DeleteRecordingConfiguration":{ "name":"DeleteRecordingConfiguration", "http":{ @@ -206,6 +241,23 @@ ], "documentation":"

Gets a specified playback authorization key pair and returns the arn and fingerprint. The privateKey held by the caller can be used to generate viewer authorization tokens, to grant viewers access to private channels. For more information, see Setting Up Private Channels in the Amazon IVS User Guide.

" }, + "GetPlaybackRestrictionPolicy":{ + "name":"GetPlaybackRestrictionPolicy", + "http":{ + "method":"POST", + "requestUri":"/GetPlaybackRestrictionPolicy", + "responseCode":200 + }, + "input":{"shape":"GetPlaybackRestrictionPolicyRequest"}, + "output":{"shape":"GetPlaybackRestrictionPolicyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"PendingVerification"} + ], + "documentation":"

Gets the specified playback restriction policy.

" + }, "GetRecordingConfiguration":{ "name":"GetRecordingConfiguration", "http":{ @@ -321,6 +373,23 @@ ], "documentation":"

Gets summary information about playback key pairs. For more information, see Setting Up Private Channels in the Amazon IVS User Guide.

" }, + "ListPlaybackRestrictionPolicies":{ + "name":"ListPlaybackRestrictionPolicies", + "http":{ + "method":"POST", + "requestUri":"/ListPlaybackRestrictionPolicies", + "responseCode":200 + }, + "input":{"shape":"ListPlaybackRestrictionPoliciesRequest"}, + "output":{"shape":"ListPlaybackRestrictionPoliciesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"PendingVerification"}, + {"shape":"ConflictException"} + ], + "documentation":"

Gets summary information about playback restriction policies.

" + }, "ListRecordingConfigurations":{ "name":"ListRecordingConfigurations", "http":{ @@ -504,6 +573,24 @@ {"shape":"ConflictException"} ], "documentation":"

Updates a channel's configuration. Live channels cannot be updated. You must stop the ongoing stream, update the channel, and restart the stream for the changes to take effect.

" + }, + "UpdatePlaybackRestrictionPolicy":{ + "name":"UpdatePlaybackRestrictionPolicy", + "http":{ + "method":"POST", + "requestUri":"/UpdatePlaybackRestrictionPolicy", + "responseCode":200 + }, + "input":{"shape":"UpdatePlaybackRestrictionPolicyRequest"}, + "output":{"shape":"UpdatePlaybackRestrictionPolicyResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"PendingVerification"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates a specified playback restriction policy.

" } }, "shapes":{ @@ -711,12 +798,16 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.)

" + "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

" }, "name":{ "shape":"ChannelName", "documentation":"

Channel name.

" }, + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" + }, "playbackUrl":{ "shape":"PlaybackURL", "documentation":"

Channel playback URL.

" @@ -727,7 +818,7 @@ }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", - "documentation":"

Recording-configuration ARN. A value other than an empty string indicates that recording is enabled. Default: \"\" (empty string, recording is disabled).

" + "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. Default: \"\" (empty string, recording is disabled).

" }, "tags":{ "shape":"Tags", @@ -784,6 +875,12 @@ }, "exception":true }, + "ChannelPlaybackRestrictionPolicyArn":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^$|^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$" + }, "ChannelRecordingConfigurationArn":{ "type":"string", "max":128, @@ -807,19 +904,23 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.)

" + "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

" }, "name":{ "shape":"ChannelName", "documentation":"

Channel name.

" }, + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" + }, "preset":{ "shape":"TranscodePreset", "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", - "documentation":"

Recording-configuration ARN. A value other than an empty string indicates that recording is enabled. Default: \"\" (empty string, recording is disabled).

" + "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. Default: \"\" (empty string, recording is disabled).

" }, "tags":{ "shape":"Tags", @@ -873,19 +974,23 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.) Default: LOW.

" + "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. Default: LOW.

" }, "name":{ "shape":"ChannelName", "documentation":"

Channel name.

" }, + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. Default: \"\" (empty string, no playback restriction policy is applied).

" + }, "preset":{ "shape":"TranscodePreset", "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", - "documentation":"

Recording-configuration ARN. Default: \"\" (empty string, recording is disabled).

" + "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. Default: \"\" (empty string, recording is disabled).

" }, "tags":{ "shape":"Tags", @@ -910,6 +1015,40 @@ } } }, + "CreatePlaybackRestrictionPolicyRequest":{ + "type":"structure", + "members":{ + "allowedCountries":{ + "shape":"PlaybackRestrictionPolicyAllowedCountryList", + "documentation":"

A list of country codes that control geoblocking restriction. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).

" + }, + "allowedOrigins":{ + "shape":"PlaybackRestrictionPolicyAllowedOriginList", + "documentation":"

A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an empty array).

" + }, + "enableStrictOriginEnforcement":{ + "shape":"PlaybackRestrictionPolicyEnableStrictOriginEnforcement", + "documentation":"

Whether channel playback is constrained by origin site. Default: false.

" + }, + "name":{ + "shape":"PlaybackRestrictionPolicyName", + "documentation":"

Playback-restriction-policy name. The value does not need to be unique.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" + } + } + }, + "CreatePlaybackRestrictionPolicyResponse":{ + "type":"structure", + "members":{ + "playbackRestrictionPolicy":{ + "shape":"PlaybackRestrictionPolicy", + "documentation":"

" + } + } + }, "CreateRecordingConfigurationRequest":{ "type":"structure", "required":["destinationConfiguration"], @@ -997,6 +1136,16 @@ "members":{ } }, + "DeletePlaybackRestrictionPolicyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

ARN of the playback restriction policy to be deleted.

" + } + } + }, "DeleteRecordingConfigurationRequest":{ "type":"structure", "required":["arn"], @@ -1065,6 +1214,25 @@ } } }, + "GetPlaybackRestrictionPolicyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

ARN of the playback restriction policy to be returned.

" + } + } + }, + "GetPlaybackRestrictionPolicyResponse":{ + "type":"structure", + "members":{ + "playbackRestrictionPolicy":{ + "shape":"PlaybackRestrictionPolicy", + "documentation":"

" + } + } + }, "GetRecordingConfigurationRequest":{ "type":"structure", "required":["arn"], @@ -1210,6 +1378,10 @@ "shape":"ChannelName", "documentation":"

Filters the channel list to match the specified name.

" }, + "filterByPlaybackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Filters the channel list to match the specified policy.

" + }, "filterByRecordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", "documentation":"

Filters the channel list to match the specified recording-configuration ARN.

" @@ -1265,6 +1437,33 @@ } } }, + "ListPlaybackRestrictionPoliciesRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxPlaybackRestrictionPolicyResults", + "documentation":"

Maximum number of policies to return. Default: 1.

" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

The first policy to retrieve. This is used for pagination; see the nextToken response field.

" + } + } + }, + "ListPlaybackRestrictionPoliciesResponse":{ + "type":"structure", + "required":["playbackRestrictionPolicies"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

If there are more channels than maxResults, use nextToken in the request to get the next set.

" + }, + "playbackRestrictionPolicies":{ + "shape":"PlaybackRestrictionPolicyList", + "documentation":"

List of the matching policies.

" + } + } + }, "ListRecordingConfigurationsRequest":{ "type":"structure", "members":{ @@ -1421,6 +1620,12 @@ "max":100, "min":1 }, + "MaxPlaybackRestrictionPolicyResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "MaxRecordingConfigurationResults":{ "type":"integer", "box":true, @@ -1518,6 +1723,114 @@ "documentation":"

Summary information about a playback key pair.

" }, "PlaybackPublicKeyMaterial":{"type":"string"}, + "PlaybackRestrictionPolicy":{ + "type":"structure", + "required":[ + "allowedCountries", + "allowedOrigins", + "arn" + ], + "members":{ + "allowedCountries":{ + "shape":"PlaybackRestrictionPolicyAllowedCountryList", + "documentation":"

A list of country codes that control geoblocking restriction. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).

" + }, + "allowedOrigins":{ + "shape":"PlaybackRestrictionPolicyAllowedOriginList", + "documentation":"

A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an empty array).

" + }, + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN

" + }, + "enableStrictOriginEnforcement":{ + "shape":"PlaybackRestrictionPolicyEnableStrictOriginEnforcement", + "documentation":"

Whether channel playback is constrained by origin site. Default: false.

" + }, + "name":{ + "shape":"PlaybackRestrictionPolicyName", + "documentation":"

Playback-restriction-policy name. The value does not need to be unique.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" + } + }, + "documentation":"

An object representing a policy to constrain playback by country and/or origin sites.

" + }, + "PlaybackRestrictionPolicyAllowedCountry":{ + "type":"string", + "max":2, + "min":2 + }, + "PlaybackRestrictionPolicyAllowedCountryList":{ + "type":"list", + "member":{"shape":"PlaybackRestrictionPolicyAllowedCountry"} + }, + "PlaybackRestrictionPolicyAllowedOrigin":{ + "type":"string", + "max":128, + "min":0 + }, + "PlaybackRestrictionPolicyAllowedOriginList":{ + "type":"list", + "member":{"shape":"PlaybackRestrictionPolicyAllowedOrigin"} + }, + "PlaybackRestrictionPolicyArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^arn:aws:ivs:[a-z0-9-]+:[0-9]+:playback-restriction-policy/[a-zA-Z0-9-]+$" + }, + "PlaybackRestrictionPolicyEnableStrictOriginEnforcement":{ + "type":"boolean", + "box":true + }, + "PlaybackRestrictionPolicyList":{ + "type":"list", + "member":{"shape":"PlaybackRestrictionPolicySummary"} + }, + "PlaybackRestrictionPolicyName":{ + "type":"string", + "max":128, + "min":0, + "pattern":"^[a-zA-Z0-9-_]*$" + }, + "PlaybackRestrictionPolicySummary":{ + "type":"structure", + "required":[ + "allowedCountries", + "allowedOrigins", + "arn" + ], + "members":{ + "allowedCountries":{ + "shape":"PlaybackRestrictionPolicyAllowedCountryList", + "documentation":"

A list of country codes that control geoblocking restriction. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).

" + }, + "allowedOrigins":{ + "shape":"PlaybackRestrictionPolicyAllowedOriginList", + "documentation":"

A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an empty array).

" + }, + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN

" + }, + "enableStrictOriginEnforcement":{ + "shape":"PlaybackRestrictionPolicyEnableStrictOriginEnforcement", + "documentation":"

Whether channel playback is constrained by origin site. Default: false.

" + }, + "name":{ + "shape":"PlaybackRestrictionPolicyName", + "documentation":"

Playback-restriction-policy name. The value does not need to be unique.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

Tags attached to the resource. Array of 1-50 maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

" + } + }, + "documentation":"

Summary information about a PlaybackRestrictionPolicy.

" + }, "PlaybackURL":{"type":"string"}, "PutMetadataRequest":{ "type":"structure", @@ -2182,7 +2495,7 @@ }, "tagKeys":{ "shape":"TagKeyList", - "documentation":"

Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

", + "documentation":"

Array of tags to be removed. Array of maps, each of the form string:string (key:value). See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

", "location":"querystring", "locationName":"tagKeys" } @@ -2211,19 +2524,23 @@ }, "latencyMode":{ "shape":"ChannelLatencyMode", - "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers. (Note: In the Amazon IVS console, LOW and NORMAL correspond to Ultra-low and Standard, respectively.)

" + "documentation":"

Channel latency mode. Use NORMAL to broadcast and deliver live video up to Full HD. Use LOW for near-real-time interaction with viewers.

" }, "name":{ "shape":"ChannelName", "documentation":"

Channel name.

" }, + "playbackRestrictionPolicyArn":{ + "shape":"ChannelPlaybackRestrictionPolicyArn", + "documentation":"

Playback-restriction-policy ARN. A valid ARN value here both specifies the ARN and enables playback restriction. If this is set to an empty string, playback restriction policy is disabled.

" + }, "preset":{ "shape":"TranscodePreset", "documentation":"

Optional transcode preset for the channel. This is selectable only for ADVANCED_HD and ADVANCED_SD channel types. For those channel types, the default preset is HIGHER_BANDWIDTH_DELIVERY. For other channel types (BASIC and STANDARD), preset is the empty string (\"\").

" }, "recordingConfigurationArn":{ "shape":"ChannelRecordingConfigurationArn", - "documentation":"

Recording-configuration ARN. If this is set to an empty string, recording is disabled. A value other than an empty string indicates that recording is enabled

" + "documentation":"

Recording-configuration ARN. A valid ARN value here both specifies the ARN and enables recording. If this is set to an empty string, recording is disabled.

" }, "type":{ "shape":"ChannelType", @@ -2234,7 +2551,45 @@ "UpdateChannelResponse":{ "type":"structure", "members":{ - "channel":{"shape":"Channel"} + "channel":{ + "shape":"Channel", + "documentation":"

Object specifying the updated channel.

" + } + } + }, + "UpdatePlaybackRestrictionPolicyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "allowedCountries":{ + "shape":"PlaybackRestrictionPolicyAllowedCountryList", + "documentation":"

A list of country codes that control geoblocking restriction. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).

" + }, + "allowedOrigins":{ + "shape":"PlaybackRestrictionPolicyAllowedOriginList", + "documentation":"

A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin. Default: All origins (an empty array).

" + }, + "arn":{ + "shape":"PlaybackRestrictionPolicyArn", + "documentation":"

ARN of the playback-restriction-policy to be updated.

" + }, + "enableStrictOriginEnforcement":{ + "shape":"PlaybackRestrictionPolicyEnableStrictOriginEnforcement", + "documentation":"

Whether channel playback is constrained by origin site. Default: false.

" + }, + "name":{ + "shape":"PlaybackRestrictionPolicyName", + "documentation":"

Playback-restriction-policy name. The value does not need to be unique.

" + } + } + }, + "UpdatePlaybackRestrictionPolicyResponse":{ + "type":"structure", + "members":{ + "playbackRestrictionPolicy":{ + "shape":"PlaybackRestrictionPolicy", + "documentation":"

Object specifying the updated policy.

" + } } }, "ValidationException":{ @@ -2302,5 +2657,5 @@ "errorCode":{"type":"string"}, "errorMessage":{"type":"string"} }, - "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference.

All API request parameters and URLs are case sensitive.

For a summary of notable documentation changes in each release, see Document History.

Allowed Header Values

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

Tagging

A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations.

At most 50 tags can be applied to a resource.

Authentication versus Authorization

Note the differences between these concepts:

Authentication

All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it’s your responsibility to sign the requests.

You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission.

For more information:

Amazon Resource Names (ARNs)

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference.

Channel Endpoints

StreamKey Endpoints

Stream Endpoints

Private Channel Endpoints

For more information, see Setting Up Private Channels in the Amazon IVS User Guide.

RecordingConfiguration Endpoints

Amazon Web Services Tags Endpoints

" + "documentation":"

Introduction

The Amazon Interactive Video Service (IVS) API is REST compatible, using a standard HTTP API and an Amazon Web Services EventBridge event stream for responses. JSON is used for both requests and responses, including errors.

The API is an Amazon Web Services regional service. For a list of supported regions and Amazon IVS HTTPS service endpoints, see the Amazon IVS page in the Amazon Web Services General Reference.

All API request parameters and URLs are case sensitive.

For a summary of notable documentation changes in each release, see Document History.

Allowed Header Values

Resources

The following resources contain information about your IVS live stream (see Getting Started with Amazon IVS):

Tagging

A tag is a metadata label that you assign to an Amazon Web Services resource. A tag comprises a key and a value, both set by you. For example, you might set a tag as topic:nature to label a particular video category. See Tagging Amazon Web Services Resources for more information, including restrictions that apply to tags and \"Tag naming limits and requirements\"; Amazon IVS has no service-specific constraints beyond what is documented there.

Tags can help you identify and organize your Amazon Web Services resources. For example, you can use the same tag for different resources to indicate that they are related. You can also use tags to manage access (see Access Tags).

The Amazon IVS API has these tag-related endpoints: TagResource, UntagResource, and ListTagsForResource. The following resources support tagging: Channels, Stream Keys, Playback Key Pairs, and Recording Configurations.

At most 50 tags can be applied to a resource.

Authentication versus Authorization

Note the differences between these concepts:

Authentication

All Amazon IVS API requests must be authenticated with a signature. The Amazon Web Services Command-Line Interface (CLI) and Amazon IVS Player SDKs take care of signing the underlying API calls for you. However, if your application calls the Amazon IVS API directly, it’s your responsibility to sign the requests.

You generate a signature using valid Amazon Web Services credentials that have permission to perform the requested action. For example, you must sign PutMetadata requests with a signature generated from a user account that has the ivs:PutMetadata permission.

For more information:

Amazon Resource Names (ARNs)

ARNs uniquely identify AWS resources. An ARN is required when you need to specify a resource unambiguously across all of AWS, such as in IAM policies and API calls. For more information, see Amazon Resource Names in the AWS General Reference.

Channel Endpoints

Playback Restriction Policy Endpoints

Private Channel Endpoints

For more information, see Setting Up Private Channels in the Amazon IVS User Guide.

RecordingConfiguration Endpoints

Stream Endpoints

StreamKey Endpoints

Amazon Web Services Tags Endpoints

" } diff -Nru awscli-2.15.9/awscli/botocore/data/keyspaces/2022-02-10/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/keyspaces/2022-02-10/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/keyspaces/2022-02-10/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/keyspaces/2022-02-10/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/keyspaces/2022-02-10/service-2.json awscli-2.15.22/awscli/botocore/data/keyspaces/2022-02-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/keyspaces/2022-02-10/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/keyspaces/2022-02-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -118,6 +118,23 @@ ], "documentation":"

Returns information about the table, including the table's name and current status, the keyspace name, configuration settings, and metadata.

To read table metadata using GetTable, Select action permissions for the table and system tables are required to complete the operation.

" }, + "GetTableAutoScalingSettings":{ + "name":"GetTableAutoScalingSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTableAutoScalingSettingsRequest"}, + "output":{"shape":"GetTableAutoScalingSettingsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns auto scaling related settings of the specified table in JSON format. If the table is a multi-Region table, the Amazon Web Services Region specific auto scaling settings of the table are included.

Amazon Keyspaces auto scaling helps you provision throughput capacity for variable workloads efficiently by increasing and decreasing your table's read and write capacity automatically in response to application traffic. For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

GetTableAutoScalingSettings can't be used as an action in an IAM policy.

To define permissions for GetTableAutoScalingSettings, you must allow the following two actions in the IAM policy statement's Action element:

" + }, "ListKeyspaces":{ "name":"ListKeyspaces", "http":{ @@ -185,7 +202,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Restores the specified table to the specified point in time within the earliest_restorable_timestamp and the current time. For more information about restore points, see Time window for PITR continuous backups in the Amazon Keyspaces Developer Guide.

Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, Amazon Keyspaces restores your source table's schema and data to the state based on the selected timestamp (day:hour:minute:second) to a new table. The Time to Live (TTL) settings are also restored to the state based on the selected timestamp.

In addition to the table's schema, data, and TTL settings, RestoreTable restores the capacity mode, encryption, and point-in-time recovery settings from the source table. Unlike the table's schema data and TTL settings, which are restored based on the selected timestamp, these settings are always restored based on the table's settings as of the current time or when the table was deleted.

You can also overwrite these settings during restore:

For more information, see PITR restore settings in the Amazon Keyspaces Developer Guide.

Note that the following settings are not restored, and you must configure them manually for the new table:

" + "documentation":"

Restores the table to the specified point in time within the earliest_restorable_timestamp and the current time. For more information about restore points, see Time window for PITR continuous backups in the Amazon Keyspaces Developer Guide.

Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.

When you restore using point in time recovery, Amazon Keyspaces restores your source table's schema and data to the state based on the selected timestamp (day:hour:minute:second) to a new table. The Time to Live (TTL) settings are also restored to the state based on the selected timestamp.

In addition to the table's schema, data, and TTL settings, RestoreTable restores the capacity mode, auto scaling settings, encryption settings, and point-in-time recovery settings from the source table. Unlike the table's schema data and TTL settings, which are restored based on the selected timestamp, these settings are always restored based on the table's settings as of the current time or when the table was deleted.

You can also overwrite these settings during restore:

For more information, see PITR restore settings in the Amazon Keyspaces Developer Guide.

Note that the following settings are not restored, and you must configure them manually for the new table:

" }, "TagResource":{ "name":"TagResource", @@ -238,7 +255,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Adds new columns to the table or updates one of the table's settings, for example capacity mode, encryption, point-in-time recovery, or ttl settings. Note that you can only update one specific table setting per update operation.

" + "documentation":"

Adds new columns to the table or updates one of the table's settings, for example capacity mode, auto scaling, encryption, point-in-time recovery, or ttl settings. Note that you can only update one specific table setting per update operation.

" } }, "shapes":{ @@ -251,11 +268,61 @@ "AccessDeniedException":{ "type":"structure", "members":{ - "message":{"shape":"String"} + "message":{ + "shape":"String", + "documentation":"

Description of the error.

" + } }, - "documentation":"

You do not have sufficient access to perform this action.

", + "documentation":"

You don't have sufficient access permissions to perform this action.

", "exception":true }, + "AutoScalingPolicy":{ + "type":"structure", + "members":{ + "targetTrackingScalingPolicyConfiguration":{ + "shape":"TargetTrackingScalingPolicyConfiguration", + "documentation":"

Auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. A double between 20 and 90.

" + } + }, + "documentation":"

Amazon Keyspaces supports the target tracking auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90.

" + }, + "AutoScalingSettings":{ + "type":"structure", + "members":{ + "autoScalingDisabled":{ + "shape":"BooleanObject", + "documentation":"

This optional parameter enables auto scaling for the table if set to false.

" + }, + "minimumUnits":{ + "shape":"CapacityUnits", + "documentation":"

The minimum level of throughput the table should always be ready to support. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).

" + }, + "maximumUnits":{ + "shape":"CapacityUnits", + "documentation":"

Manage costs by specifying the maximum amount of throughput to provision. The value must be between 1 and the max throughput per second quota for your account (40,000 by default).

" + }, + "scalingPolicy":{ + "shape":"AutoScalingPolicy", + "documentation":"

Amazon Keyspaces supports the target tracking auto scaling policy. With this policy, Amazon Keyspaces auto scaling ensures that the table's ratio of consumed to provisioned capacity stays at or near the target value that you specify. You define the target value as a percentage between 20 and 90.

" + } + }, + "documentation":"

The optional auto scaling settings for a table with provisioned throughput capacity.

To turn on auto scaling for a table in throughputMode:PROVISIONED, you must specify the following parameters.

Configure the minimum and maximum capacity units. The auto scaling policy ensures that capacity never goes below the minimum or above the maximum range.

For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

" + }, + "AutoScalingSpecification":{ + "type":"structure", + "members":{ + "writeCapacityAutoScaling":{ + "shape":"AutoScalingSettings", + "documentation":"

The auto scaling settings for the table's write capacity.

" + }, + "readCapacityAutoScaling":{ + "shape":"AutoScalingSettings", + "documentation":"

The auto scaling settings for the table's read capacity.

" + } + }, + "documentation":"

The optional auto scaling capacity settings for a table in provisioned capacity mode.

" + }, + "BooleanObject":{"type":"boolean"}, "CapacitySpecification":{ "type":"structure", "required":["throughputMode"], @@ -377,9 +444,12 @@ "ConflictException":{ "type":"structure", "members":{ - "message":{"shape":"String"} + "message":{ + "shape":"String", + "documentation":"

Description of the error.

" + } }, - "documentation":"

Amazon Keyspaces could not complete the requested action. This error may occur if you try to perform an action and the same or a different action is already in progress, or if you try to create a resource that already exists.

", + "documentation":"

Amazon Keyspaces couldn't complete the requested action. This error may occur if you try to perform an action and the same or a different action is already in progress, or if you try to create a resource that already exists.

", "exception":true }, "CreateKeyspaceRequest":{ @@ -461,6 +531,14 @@ "clientSideTimestamps":{ "shape":"ClientSideTimestamps", "documentation":"

Enables client-side timestamps for the table. By default, the setting is disabled. You can enable client-side timestamps with the following option:

Once client-side timestamps are enabled for a table, this setting cannot be disabled.

" + }, + "autoScalingSpecification":{ + "shape":"AutoScalingSpecification", + "documentation":"

The optional auto scaling settings for a table in provisioned capacity mode. Specifies if the service can manage throughput capacity automatically on your behalf.

Auto scaling helps you provision throughput capacity for variable workloads efficiently by increasing and decreasing your table's read and write capacity automatically in response to application traffic. For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

By default, auto scaling is disabled for a table.

" + }, + "replicaSpecifications":{ + "shape":"ReplicaSpecificationList", + "documentation":"

The optional Amazon Web Services Region specific settings of a multi-Region table. These settings overwrite the general settings of the table for the specified Region.

For a multi-Region table in provisioned capacity mode, you can configure the table's read capacity differently for each Region's replica. The write capacity, however, remains synchronized between all replicas to ensure that there's enough capacity to replicate writes across all Regions. To define the read capacity for a table replica in a specific Region, you can do so by configuring the following parameters.

" } } }, @@ -478,7 +556,7 @@ "type":"integer", "box":true, "max":630720000, - "min":1 + "min":0 }, "DeleteKeyspaceRequest":{ "type":"structure", @@ -517,6 +595,7 @@ "members":{ } }, + "DoubleObject":{"type":"double"}, "EncryptionSpecification":{ "type":"structure", "required":["type"], @@ -576,6 +655,53 @@ } } }, + "GetTableAutoScalingSettingsRequest":{ + "type":"structure", + "required":[ + "keyspaceName", + "tableName" + ], + "members":{ + "keyspaceName":{ + "shape":"KeyspaceName", + "documentation":"

The name of the keyspace.

" + }, + "tableName":{ + "shape":"TableName", + "documentation":"

The name of the table.

" + } + } + }, + "GetTableAutoScalingSettingsResponse":{ + "type":"structure", + "required":[ + "keyspaceName", + "tableName", + "resourceArn" + ], + "members":{ + "keyspaceName":{ + "shape":"KeyspaceName", + "documentation":"

The name of the keyspace.

" + }, + "tableName":{ + "shape":"TableName", + "documentation":"

The name of the table.

" + }, + "resourceArn":{ + "shape":"ARN", + "documentation":"

The Amazon Resource Name (ARN) of the table.

" + }, + "autoScalingSpecification":{ + "shape":"AutoScalingSpecification", + "documentation":"

The auto scaling settings of the table.

" + }, + "replicaSpecifications":{ + "shape":"ReplicaAutoScalingSpecificationList", + "documentation":"

The Amazon Web Services Region specific settings of a multi-Region table. Returns the settings for all Regions the table is replicated in.

" + } + } + }, "GetTableRequest":{ "type":"structure", "required":[ @@ -652,13 +778,21 @@ "clientSideTimestamps":{ "shape":"ClientSideTimestamps", "documentation":"

The client-side timestamps setting of the table.

" + }, + "replicaSpecifications":{ + "shape":"ReplicaSpecificationSummaryList", + "documentation":"

Returns the Amazon Web Services Region specific settings of all Regions a multi-Region table is replicated in.

" } } }, + "IntegerObject":{"type":"integer"}, "InternalServerException":{ "type":"structure", "members":{ - "message":{"shape":"String"} + "message":{ + "shape":"String", + "documentation":"

Description of the error.

" + } }, "documentation":"

Amazon Keyspaces was unable to fully process this request because of an internal server error.

", "exception":true, @@ -856,6 +990,69 @@ "max":6, "min":2 }, + "ReplicaAutoScalingSpecification":{ + "type":"structure", + "members":{ + "region":{ + "shape":"region", + "documentation":"

The Amazon Web Services Region.

" + }, + "autoScalingSpecification":{ + "shape":"AutoScalingSpecification", + "documentation":"

The auto scaling settings for a multi-Region table in the specified Amazon Web Services Region.

" + } + }, + "documentation":"

The auto scaling settings of a multi-Region table in the specified Amazon Web Services Region.

" + }, + "ReplicaAutoScalingSpecificationList":{ + "type":"list", + "member":{"shape":"ReplicaAutoScalingSpecification"}, + "min":0 + }, + "ReplicaSpecification":{ + "type":"structure", + "required":["region"], + "members":{ + "region":{ + "shape":"region", + "documentation":"

The Amazon Web Services Region.

" + }, + "readCapacityUnits":{ + "shape":"CapacityUnits", + "documentation":"

The provisioned read capacity units for the multi-Region table in the specified Amazon Web Services Region.

" + }, + "readCapacityAutoScaling":{ + "shape":"AutoScalingSettings", + "documentation":"

The read capacity auto scaling settings for the multi-Region table in the specified Amazon Web Services Region.

" + } + }, + "documentation":"

The Amazon Web Services Region specific settings of a multi-Region table.

For a multi-Region table, you can configure the table's read capacity differently per Amazon Web Services Region. You can do this by configuring the following parameters.

" + }, + "ReplicaSpecificationList":{ + "type":"list", + "member":{"shape":"ReplicaSpecification"}, + "min":1 + }, + "ReplicaSpecificationSummary":{ + "type":"structure", + "members":{ + "region":{ + "shape":"region", + "documentation":"

The Amazon Web Services Region.

" + }, + "status":{ + "shape":"TableStatus", + "documentation":"

The status of the multi-Region table in the specified Amazon Web Services Region.

" + }, + "capacitySpecification":{"shape":"CapacitySpecificationSummary"} + }, + "documentation":"

The Region-specific settings of a multi-Region table in the specified Amazon Web Services Region.

If the multi-Region table is using provisioned capacity and has optional auto scaling policies configured, note that the Region specific summary returns both read and write capacity settings. But only Region specific read capacity settings can be configured for a multi-Region table. In a multi-Region table, your write capacity units will be synced across all Amazon Web Services Regions to ensure that there is enough capacity to replicate write events across Regions.

" + }, + "ReplicaSpecificationSummaryList":{ + "type":"list", + "member":{"shape":"ReplicaSpecificationSummary"}, + "min":0 + }, "ReplicationSpecification":{ "type":"structure", "required":["replicationStrategy"], @@ -874,7 +1071,10 @@ "ResourceNotFoundException":{ "type":"structure", "members":{ - "message":{"shape":"String"}, + "message":{ + "shape":"String", + "documentation":"

Description of the error.

" + }, "resourceArn":{ "shape":"ARN", "documentation":"

The unique identifier in the format of Amazon Resource Name (ARN), for the resource not found.

" @@ -927,6 +1127,14 @@ "tagsOverride":{ "shape":"TagList", "documentation":"

A list of key-value pair tags to be attached to the restored table.

For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer Guide.

" + }, + "autoScalingSpecification":{ + "shape":"AutoScalingSpecification", + "documentation":"

The optional auto scaling settings for the restored table in provisioned capacity mode. Specifies if the service can manage throughput capacity of a provisioned table automatically on your behalf. Amazon Keyspaces auto scaling helps you provision throughput capacity for variable workloads efficiently by increasing and decreasing your table's read and write capacity automatically in response to application traffic.

For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

" + }, + "replicaSpecifications":{ + "shape":"ReplicaSpecificationList", + "documentation":"

The optional Region specific settings of a multi-Regional table.

" } } }, @@ -969,7 +1177,10 @@ "ServiceQuotaExceededException":{ "type":"structure", "members":{ - "message":{"shape":"String"} + "message":{ + "shape":"String", + "documentation":"

Description of the error.

" + } }, "documentation":"

The operation exceeded the service quota for this resource. For more information on service quotas, see Quotas in the Amazon Keyspaces Developer Guide.

", "exception":true @@ -1098,6 +1309,29 @@ "max":256, "min":1 }, + "TargetTrackingScalingPolicyConfiguration":{ + "type":"structure", + "required":["targetValue"], + "members":{ + "disableScaleIn":{ + "shape":"BooleanObject", + "documentation":"

Specifies if scale-in is enabled.

When auto scaling automatically decreases capacity for a table, the table scales in. When scaling policies are set, they can't scale in the table lower than its minimum capacity.

" + }, + "scaleInCooldown":{ + "shape":"IntegerObject", + "documentation":"

Specifies a scale-in cool down period.

A cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.

" + }, + "scaleOutCooldown":{ + "shape":"IntegerObject", + "documentation":"

Specifies a scale out cool down period.

A cooldown period in seconds between scaling activities that lets the table stabilize before another scaling activity starts.

" + }, + "targetValue":{ + "shape":"DoubleObject", + "documentation":"

Specifies the target value for the target tracking auto scaling policy.

Amazon Keyspaces auto scaling scales up capacity automatically when traffic exceeds this target utilization rate, and then back down when it falls below the target. This ensures that the ratio of consumed capacity to provisioned capacity stays at or near this value. You define targetValue as a percentage. A double between 20 and 90.

" + } + }, + "documentation":"

The auto scaling policy that scales a table based on the ratio of consumed to provisioned capacity.

" + }, "ThroughputMode":{ "type":"string", "enum":[ @@ -1185,6 +1419,14 @@ "clientSideTimestamps":{ "shape":"ClientSideTimestamps", "documentation":"

Enables client-side timestamps for the table. By default, the setting is disabled. You can enable client-side timestamps with the following option:

Once client-side timestamps are enabled for a table, this setting cannot be disabled.

" + }, + "autoScalingSpecification":{ + "shape":"AutoScalingSpecification", + "documentation":"

The optional auto scaling settings to update for a table in provisioned capacity mode. Specifies if the service can manage throughput capacity of a provisioned table automatically on your behalf. Amazon Keyspaces auto scaling helps you provision throughput capacity for variable workloads efficiently by increasing and decreasing your table's read and write capacity automatically in response to application traffic.

If auto scaling is already enabled for the table, you can use UpdateTable to update the minimum and maximum values or the auto scaling policy settings independently.

For more information, see Managing throughput capacity automatically with Amazon Keyspaces auto scaling in the Amazon Keyspaces Developer Guide.

" + }, + "replicaSpecifications":{ + "shape":"ReplicaSpecificationList", + "documentation":"

The Region specific settings of a multi-Regional table.

" } } }, @@ -1201,7 +1443,10 @@ "ValidationException":{ "type":"structure", "members":{ - "message":{"shape":"String"} + "message":{ + "shape":"String", + "documentation":"

Description of the error.

" + } }, "documentation":"

The operation failed due to an invalid or malformed request.

", "exception":true diff -Nru awscli-2.15.9/awscli/botocore/data/lambda/2015-03-31/service-2.json awscli-2.15.22/awscli/botocore/data/lambda/2015-03-31/service-2.json --- awscli-2.15.9/awscli/botocore/data/lambda/2015-03-31/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/lambda/2015-03-31/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -637,7 +637,7 @@ {"shape":"InvalidRuntimeException"}, {"shape":"ResourceConflictException"} ], - "documentation":"

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

", + "documentation":"

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.

", "deprecated":true }, "InvokeWithResponseStream":{ @@ -1742,7 +1742,7 @@ "members":{ "EventSourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

" + "documentation":"

The Amazon Resource Name (ARN) of the event source.

" }, "FunctionName":{ "shape":"FunctionName", @@ -1778,7 +1778,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"

(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

" + "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

" }, "MaximumRecordAgeInSeconds":{ "shape":"MaximumRecordAgeInSeconds", @@ -1928,7 +1928,7 @@ }, "EphemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.

" + "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" }, "SnapStart":{ "shape":"SnapStart", @@ -2405,7 +2405,7 @@ "documentation":"

The size of the function's /tmp directory.

" } }, - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.

" + "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" }, "EphemeralStorageSize":{ "type":"integer", @@ -2469,7 +2469,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"

(Kinesis and DynamoDB Streams only) An Amazon SQS queue or Amazon SNS topic destination for discarded records.

" + "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.

" }, "Topics":{ "shape":"Topics", @@ -2792,7 +2792,7 @@ }, "EphemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

The size of the function’s /tmp directory in MB. The default value is 512, but it can be any whole number between 512 and 10,240 MB.

" + "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" }, "SnapStart":{ "shape":"SnapStartResponse", @@ -3574,7 +3574,7 @@ }, "ClientContext":{ "shape":"String", - "documentation":"

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

", + "documentation":"

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext object to your function for synchronous invocations only.

", "location":"header", "locationName":"X-Amz-Client-Context" }, @@ -4113,7 +4113,7 @@ "members":{ "EventSourceArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

", + "documentation":"

The Amazon Resource Name (ARN) of the event source.

", "location":"querystring", "locationName":"EventSourceArn" }, @@ -4311,7 +4311,7 @@ "members":{ "CompatibleRuntime":{ "shape":"Runtime", - "documentation":"

A runtime identifier. For example, go1.x.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", + "documentation":"

A runtime identifier. For example, java21.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", "location":"querystring", "locationName":"CompatibleRuntime" }, @@ -4359,7 +4359,7 @@ "members":{ "CompatibleRuntime":{ "shape":"Runtime", - "documentation":"

A runtime identifier. For example, go1.x.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", + "documentation":"

A runtime identifier. For example, java21.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

", "location":"querystring", "locationName":"CompatibleRuntime" }, @@ -4525,11 +4525,11 @@ }, "ApplicationLogLevel":{ "shape":"ApplicationLogLevel", - "documentation":"

Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level and lower.

" + "documentation":"

Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE is the highest level and FATAL is the lowest.

" }, "SystemLogLevel":{ "shape":"SystemLogLevel", - "documentation":"

Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level and lower.

" + "documentation":"

Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG is the highest level and WARN is the lowest.

" }, "LogGroup":{ "shape":"LogGroup", @@ -4639,7 +4639,7 @@ "members":{ "Destination":{ "shape":"DestinationArn", - "documentation":"

The Amazon Resource Name (ARN) of the destination resource.

" + "documentation":"

The Amazon Resource Name (ARN) of the destination resource.

To retain records of asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

To retain records of failed invocations from Kinesis and DynamoDB event sources, you can configure an Amazon SNS topic or Amazon SQS queue as the destination.

To retain records of failed invocations from self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.

" } }, "documentation":"

A destination for events that failed processing.

" @@ -5275,6 +5275,7 @@ "dotnetcore2.1", "dotnetcore3.1", "dotnet6", + "dotnet8", "nodejs4.3-edge", "go1.x", "ruby2.5", @@ -5830,7 +5831,7 @@ }, "DestinationConfig":{ "shape":"DestinationConfig", - "documentation":"

(Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard Amazon SNS topic destination for discarded records.

" + "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

" }, "MaximumRecordAgeInSeconds":{ "shape":"MaximumRecordAgeInSeconds", @@ -5990,7 +5991,7 @@ }, "EphemeralStorage":{ "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB.

" + "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" }, "SnapStart":{ "shape":"SnapStart", diff -Nru awscli-2.15.9/awscli/botocore/data/lexv2-models/2020-08-07/service-2.json awscli-2.15.22/awscli/botocore/data/lexv2-models/2020-08-07/service-2.json --- awscli-2.15.9/awscli/botocore/data/lexv2-models/2020-08-07/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/lexv2-models/2020-08-07/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -143,6 +143,25 @@ ], "documentation":"

Creates a locale in the bot. The locale contains the intents and slot types that the bot uses in conversations with users in the specified language and locale. You must add a locale to a bot before you can add intents and slot types to the bot.

" }, + "CreateBotReplica":{ + "name":"CreateBotReplica", + "http":{ + "method":"PUT", + "requestUri":"/bots/{botId}/replicas/", + "responseCode":202 + }, + "input":{"shape":"CreateBotReplicaRequest"}, + "output":{"shape":"CreateBotReplicaResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"PreconditionFailedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Action to create a replication of the source bot in the secondary region.

" + }, "CreateBotVersion":{ "name":"CreateBotVersion", "http":{ @@ -371,6 +390,25 @@ ], "documentation":"

Removes a locale from a bot.

When you delete a locale, all intents, slots, and slot types defined for the locale are also deleted.

" }, + "DeleteBotReplica":{ + "name":"DeleteBotReplica", + "http":{ + "method":"DELETE", + "requestUri":"/bots/{botId}/replicas/{replicaRegion}/", + "responseCode":202 + }, + "input":{"shape":"DeleteBotReplicaRequest"}, + "output":{"shape":"DeleteBotReplicaResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"PreconditionFailedException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

The action to delete the replicated bot in the secondary region.

" + }, "DeleteBotVersion":{ "name":"DeleteBotVersion", "http":{ @@ -639,6 +677,24 @@ ], "documentation":"

Provides metadata information about a bot recommendation. This information will enable you to get a description on the request inputs, to download associated transcripts after processing is complete, and to download intents and slot-types generated by the bot recommendation.

" }, + "DescribeBotReplica":{ + "name":"DescribeBotReplica", + "http":{ + "method":"GET", + "requestUri":"/bots/{botId}/replicas/{replicaRegion}/", + "responseCode":200 + }, + "input":{"shape":"DescribeBotReplicaRequest"}, + "output":{"shape":"DescribeBotReplicaResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Monitors the bot replication status through the UI console.

" + }, "DescribeBotResourceGeneration":{ "name":"DescribeBotResourceGeneration", "http":{ @@ -924,6 +980,23 @@ ], "documentation":"

Provides a list of utterances that users have sent to the bot.

Utterances are aggregated by the text of the utterance. For example, all instances where customers used the phrase \"I want to order pizza\" are aggregated into the same line in the response.

You can see both detected utterances and missed utterances. A detected utterance is where the bot properly recognized the utterance and activated the associated intent. A missed utterance was not recognized by the bot and didn't activate an intent.

Utterances can be aggregated for a bot alias or for a bot version, but not both at the same time.

Utterances statistics are not generated under the following conditions:

" }, + "ListBotAliasReplicas":{ + "name":"ListBotAliasReplicas", + "http":{ + "method":"POST", + "requestUri":"/bots/{botId}/replicas/{replicaRegion}/botaliases/", + "responseCode":200 + }, + "input":{"shape":"ListBotAliasReplicasRequest"}, + "output":{"shape":"ListBotAliasReplicasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

The action to list the replicated bots created from the source bot alias.

" + }, "ListBotAliases":{ "name":"ListBotAliases", "http":{ @@ -975,6 +1048,23 @@ ], "documentation":"

Get a list of bot recommendations that meet the specified criteria.

" }, + "ListBotReplicas":{ + "name":"ListBotReplicas", + "http":{ + "method":"POST", + "requestUri":"/bots/{botId}/replicas/", + "responseCode":200 + }, + "input":{"shape":"ListBotReplicasRequest"}, + "output":{"shape":"ListBotReplicasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

The action to list the replicated bots.

" + }, "ListBotResourceGenerations":{ "name":"ListBotResourceGenerations", "http":{ @@ -992,6 +1082,23 @@ ], "documentation":"

Lists the generation requests made for a bot locale.

" }, + "ListBotVersionReplicas":{ + "name":"ListBotVersionReplicas", + "http":{ + "method":"POST", + "requestUri":"/bots/{botId}/replicas/{replicaRegion}/botversions/", + "responseCode":200 + }, + "input":{"shape":"ListBotVersionReplicasRequest"}, + "output":{"shape":"ListBotVersionReplicasResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Contains information about all the versions replication statuses applicable for Global Resiliency.

" + }, "ListBotVersions":{ "name":"ListBotVersions", "http":{ @@ -3258,6 +3365,51 @@ "min":1, "pattern":"^(\\bAmazonLexTestAlias\\b|[0-9a-zA-Z][_-]?)+$" }, + "BotAliasReplicaSummary":{ + "type":"structure", + "members":{ + "botAliasId":{ + "shape":"BotAliasId", + "documentation":"

The bot alias ID for all the alias bot replications.

" + }, + "botAliasReplicationStatus":{ + "shape":"BotAliasReplicationStatus", + "documentation":"

The replication statuses for all the alias bot replications.

" + }, + "botVersion":{ + "shape":"BotVersion", + "documentation":"

The bot version for all the alias bot replications.

" + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

The creation time and date for all the alias bot replications.

" + }, + "lastUpdatedDateTime":{ + "shape":"Timestamp", + "documentation":"

The last time and date updated for all the alias bot replications.

" + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

The reasons for failure for the aliases bot replications.

" + } + }, + "documentation":"

Contains information about all the aliases replication statuses applicable for global resiliency.

" + }, + "BotAliasReplicaSummaryList":{ + "type":"list", + "member":{"shape":"BotAliasReplicaSummary"} + }, + "BotAliasReplicationStatus":{ + "type":"string", + "documentation":"

The status of the operation to replicate the bot alias. Values: Creating, Updating, Available, Deleting, Failed.

", + "enum":[ + "Creating", + "Updating", + "Available", + "Deleting", + "Failed" + ] + }, "BotAliasStatus":{ "type":"string", "enum":[ @@ -3721,6 +3873,42 @@ "type":"list", "member":{"shape":"BotRecommendationSummary"} }, + "BotReplicaStatus":{ + "type":"string", + "documentation":"

The status of the operation to replicate the bot. Values: Enabling, Enabled, Deleting, Failed.

", + "enum":[ + "Enabling", + "Enabled", + "Deleting", + "Failed" + ] + }, + "BotReplicaSummary":{ + "type":"structure", + "members":{ + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The replica region used in the replication statuses summary.

" + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

The creation time and date for the replicated bots.

" + }, + "botReplicaStatus":{ + "shape":"BotReplicaStatus", + "documentation":"

The operation status for the replicated bot applicable.

" + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

The reasons for the failure for the replicated bot.

" + } + }, + "documentation":"

Contains summary information about all the replication statuses applicable for global resiliency.

" + }, + "BotReplicaSummaryList":{ + "type":"list", + "member":{"shape":"BotReplicaSummary"} + }, "BotSortAttribute":{ "type":"string", "enum":["BotName"] @@ -3824,6 +4012,64 @@ "value":{"shape":"BotVersionLocaleDetails"}, "min":1 }, + "BotVersionReplicaSortAttribute":{ + "type":"string", + "enum":["BotVersion"] + }, + "BotVersionReplicaSortBy":{ + "type":"structure", + "required":[ + "attribute", + "order" + ], + "members":{ + "attribute":{ + "shape":"BotVersionReplicaSortAttribute", + "documentation":"

The attribute of the sort category for the version replicated bots.

" + }, + "order":{ + "shape":"SortOrder", + "documentation":"

The order of the sort category for the version replicated bots.

" + } + }, + "documentation":"

The sort category for the version replicated bots.

" + }, + "BotVersionReplicaSummary":{ + "type":"structure", + "members":{ + "botVersion":{ + "shape":"BotVersion", + "documentation":"

The bot version for the summary information for all the version replication statuses.

" + }, + "botVersionReplicationStatus":{ + "shape":"BotVersionReplicationStatus", + "documentation":"

The version replication status for all the replicated bots.

" + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

The creation date and time of the replication status for all the replicated bots.

" + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

The reasons for replication failure for all the replicated bots.

" + } + }, + "documentation":"

Contains summary information for all the version replication statuses applicable for Global resiliency.

" + }, + "BotVersionReplicaSummaryList":{ + "type":"list", + "member":{"shape":"BotVersionReplicaSummary"} + }, + "BotVersionReplicationStatus":{ + "type":"string", + "documentation":"

The status of the operation to replicate the bot version. Values: Creating, Available, Deleting, Failed.

", + "enum":[ + "Creating", + "Available", + "Deleting", + "Failed" + ] + }, "BotVersionSortAttribute":{ "type":"string", "enum":["BotVersion"] @@ -4609,6 +4855,50 @@ "generativeAISettings":{"shape":"GenerativeAISettings"} } }, + "CreateBotReplicaRequest":{ + "type":"structure", + "required":[ + "botId", + "replicaRegion" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The request for the unique bot ID of the source bot to be replicated in the secondary region.

", + "location":"uri", + "locationName":"botId" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The request for the secondary region that will be used in the replication of the source bot.

" + } + } + }, + "CreateBotReplicaResponse":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The unique bot ID of the replicated bot generated.

" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The region of the replicated bot generated.

" + }, + "sourceRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The source region for the source bot used for the replicated bot generated.

" + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

The creation date and time of the replicated bot generated.

" + }, + "botReplicaStatus":{ + "shape":"BotReplicaStatus", + "documentation":"

The operational status of the replicated bot generated.

" + } + } + }, "CreateBotRequest":{ "type":"structure", "required":[ @@ -5620,6 +5910,44 @@ } } }, + "DeleteBotReplicaRequest":{ + "type":"structure", + "required":[ + "botId", + "replicaRegion" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The unique ID of the replicated bot to be deleted from the secondary region

", + "location":"uri", + "locationName":"botId" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The secondary region of the replicated bot that will be deleted.

", + "location":"uri", + "locationName":"replicaRegion" + } + } + }, + "DeleteBotReplicaResponse":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The unique bot ID of the replicated bot generated.

" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The region of the replicated bot generated.

" + }, + "botReplicaStatus":{ + "shape":"BotReplicaStatus", + "documentation":"

The operational status of the replicated bot generated.

" + } + } + }, "DeleteBotRequest":{ "type":"structure", "required":["botId"], @@ -6290,6 +6618,56 @@ } } }, + "DescribeBotReplicaRequest":{ + "type":"structure", + "required":[ + "botId", + "replicaRegion" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The request for the unique bot ID of the replicated bot being monitored.

", + "location":"uri", + "locationName":"botId" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The request for the region of the replicated bot being monitored.

", + "location":"uri", + "locationName":"replicaRegion" + } + } + }, + "DescribeBotReplicaResponse":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The unique bot ID of the replicated bot being monitored.

" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The region of the replicated bot being monitored.

" + }, + "sourceRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The source region of the replicated bot being monitored.

" + }, + "creationDateTime":{ + "shape":"Timestamp", + "documentation":"

The creation date and time of the replicated bot being monitored.

" + }, + "botReplicaStatus":{ + "shape":"BotReplicaStatus", + "documentation":"

The operational status of the replicated bot being monitored.

" + }, + "failureReasons":{ + "shape":"FailureReasons", + "documentation":"

The failure reasons the bot being monitored failed to replicate.

" + } + } + }, "DescribeBotRequest":{ "type":"structure", "required":["botId"], @@ -8629,6 +9007,60 @@ } } }, + "ListBotAliasReplicasRequest":{ + "type":"structure", + "required":[ + "botId", + "replicaRegion" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The request for the unique bot ID of the replicated bot created from the source bot alias.

", + "location":"uri", + "locationName":"botId" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The request for the secondary region of the replicated bot created from the source bot alias.

", + "location":"uri", + "locationName":"replicaRegion" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The request for maximum results to list the replicated bots created from the source bot alias.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The request for the next token for the replicated bot created from the source bot alias.

" + } + } + }, + "ListBotAliasReplicasResponse":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The unique bot ID of the replicated bot created from the source bot alias.

" + }, + "sourceRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The source region of the replicated bot created from the source bot alias.

" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The secondary region of the replicated bot created from the source bot alias.

" + }, + "botAliasReplicaSummaries":{ + "shape":"BotAliasReplicaSummaryList", + "documentation":"

The summary information of the replicated bot created from the source bot alias.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The next token for the replicated bots created from the source bot alias.

" + } + } + }, "ListBotAliasesRequest":{ "type":"structure", "required":["botId"], @@ -8785,6 +9217,35 @@ } } }, + "ListBotReplicasRequest":{ + "type":"structure", + "required":["botId"], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The request for the unique bot IDs in the list of replicated bots.

", + "location":"uri", + "locationName":"botId" + } + } + }, + "ListBotReplicasResponse":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

the unique bot IDs in the list of replicated bots.

" + }, + "sourceRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The source region of the source bots in the list of replicated bots.

" + }, + "botReplicaSummaries":{ + "shape":"BotReplicaSummaryList", + "documentation":"

The summary details for the replicated bots.

" + } + } + }, "ListBotResourceGenerationsRequest":{ "type":"structure", "required":[ @@ -8850,6 +9311,64 @@ } } }, + "ListBotVersionReplicasRequest":{ + "type":"structure", + "required":[ + "botId", + "replicaRegion" + ], + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The request for the unique ID in the list of replicated bots.

", + "location":"uri", + "locationName":"botId" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The request for the region used in the list of replicated bots.

", + "location":"uri", + "locationName":"replicaRegion" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum results given in the list of replicated bots.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The next token given in the list of replicated bots.

" + }, + "sortBy":{ + "shape":"BotVersionReplicaSortBy", + "documentation":"

The requested sort category for the list of replicated bots.

" + } + } + }, + "ListBotVersionReplicasResponse":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"Id", + "documentation":"

The unique ID of the bots in the list of replicated bots.

" + }, + "sourceRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The source region used for the bots in the list of replicated bots.

" + }, + "replicaRegion":{ + "shape":"ReplicaRegion", + "documentation":"

The region used for the replicated bots in the list of replicated bots.

" + }, + "botVersionReplicaSummaries":{ + "shape":"BotVersionReplicaSummaryList", + "documentation":"

The information summary used for the replicated bots in the list of replicated bots.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The next token used for the replicated bots in the list of replicated bots.

" + } + } + }, "ListBotVersionsRequest":{ "type":"structure", "required":["botId"], @@ -10599,6 +11118,12 @@ }, "documentation":"

Specifies the time window that utterance statistics are returned for. The time window is always relative to the last time that the that utterances were aggregated. For example, if the ListAggregatedUtterances operation is called at 1600, the time window is set to 1 hour, and the last refresh time was 1530, only utterances made between 1430 and 1530 are returned.

You can choose the time window that statistics should be returned for.

" }, + "ReplicaRegion":{ + "type":"string", + "documentation":"

The region that contains the replicated bots. Minimum value = 2, maximum value = 25.

", + "max":25, + "min":2 + }, "ResourceCount":{"type":"integer"}, "ResourceNotFoundException":{ "type":"structure", diff -Nru awscli-2.15.9/awscli/botocore/data/lightsail/2016-11-28/service-2.json awscli-2.15.22/awscli/botocore/data/lightsail/2016-11-28/service-2.json --- awscli-2.15.9/awscli/botocore/data/lightsail/2016-11-28/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/lightsail/2016-11-28/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -3832,6 +3832,10 @@ "supportedAppCategories":{ "shape":"AppCategoryList", "documentation":"

Virtual computer blueprints that are supported by a Lightsail for Research bundle.

This parameter only applies to Lightsail for Research resources.

" + }, + "publicIpv4AddressCount":{ + "shape":"integer", + "documentation":"

An integer that indicates the public ipv4 address count included in the bundle, the value is either 0 or 1.

" } }, "documentation":"

Describes a bundle, which is a set of specs describing your virtual private server (or instance).

" @@ -8849,6 +8853,10 @@ "shape":"IpAddress", "documentation":"

The public IP address of the Amazon Lightsail instance.

" }, + "ipv6Addresses":{ + "shape":"Ipv6AddressList", + "documentation":"

The IPv6 address of the Amazon Lightsail instance.

" + }, "password":{ "shape":"string", "documentation":"

For RDP access, the password for your Amazon Lightsail instance. Password will be an empty string if the password for your new instance is not ready yet. When you create an instance, it can take up to 15 minutes for the instance to be ready.

If you create an instance using any key pair other than the default (LightsailDefaultKeyPair), password will always be an empty string.

If you change the Administrator password on the instance, Lightsail will continue to return the original password value. When accessing the instance using RDP, you need to manually enter the Administrator password after changing it from the default.

" @@ -12348,6 +12356,10 @@ "caCertificateIdentifier":{ "shape":"string", "documentation":"

Indicates the certificate that needs to be associated with the database.

" + }, + "relationalDatabaseBlueprintId":{ + "shape":"string", + "documentation":"

This parameter is used to update the major version of the database. Enter the blueprintId for the major version that you want to update to.

Use the GetRelationalDatabaseBlueprints action to get a list of available blueprint IDs.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/location/2020-11-19/service-2.json awscli-2.15.22/awscli/botocore/data/location/2020-11-19/service-2.json --- awscli-2.15.9/awscli/botocore/data/location/2020-11-19/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/location/2020-11-19/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -2424,6 +2424,18 @@ } } }, + "CustomLayer":{ + "type":"string", + "max":100, + "min":1, + "pattern":"^[-._\\w]+$" + }, + "CustomLayerList":{ + "type":"list", + "member":{"shape":"CustomLayer"}, + "max":10, + "min":0 + }, "DataSourceConfiguration":{ "type":"structure", "members":{ @@ -3256,7 +3268,7 @@ "members":{ "FontStack":{ "shape":"String", - "documentation":"

A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode.

Valid font stacks for Esri styles:

Valid font stacks for HERE Technologies styles:

Valid font stacks for GrabMaps styles:

Valid font stacks for Open Data styles:

The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember.

", + "documentation":"

A comma-separated list of fonts to load glyphs from in order of preference. For example, Noto Sans Regular, Arial Unicode.

Valid font stacks for Esri styles:

Valid font stacks for HERE Technologies styles:

Valid font stacks for GrabMaps styles:

Valid font stacks for Open Data styles:

The fonts used by the Open Data map styles are combined fonts that use Amazon Ember for most glyphs but Noto Sans for glyphs unsupported by Amazon Ember.

", "location":"uri", "locationName":"FontStack" }, @@ -3501,7 +3513,7 @@ }, "PlaceId":{ "shape":"PlaceId", - "documentation":"

The identifier of the place to find.

", + "documentation":"

The identifier of the place to find.

While you can use PlaceID in subsequent requests, PlaceID is not intended to be a permanent identifier and the ID can change between consecutive API calls. Please see the following PlaceID behaviour for each data provider:

", "location":"uri", "locationName":"PlaceId" } @@ -4317,13 +4329,17 @@ "type":"structure", "required":["Style"], "members":{ + "CustomLayers":{ + "shape":"CustomLayerList", + "documentation":"

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. Default is unset.

Currenlty only VectorEsriNavigation supports CustomLayers. For more information, see Custom Layers.

" + }, "PoliticalView":{ "shape":"CountryCode3", "documentation":"

Specifies the political view for the style. Leave unset to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view.

Default is unset.

Not all map resources or styles support political view styles. See Political views for more information.

" }, "Style":{ "shape":"MapStyle", - "documentation":"

Specifies the map style selected from an available data provider.

Valid Esri map styles:

Valid HERE Technologies map styles:

Valid GrabMaps map styles:

Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered.

Valid Open Data map styles:

" + "documentation":"

Specifies the map style selected from an available data provider.

Valid Esri map styles:

Valid HERE Technologies map styles:

Valid GrabMaps map styles:

Grab provides maps only for countries in Southeast Asia, and is only available in the Asia Pacific (Singapore) Region (ap-southeast-1). For more information, see GrabMaps countries and area covered.

Valid Open Data map styles:

" } }, "documentation":"

Specifies the map tile style selected from an available provider.

" @@ -4331,6 +4347,10 @@ "MapConfigurationUpdate":{ "type":"structure", "members":{ + "CustomLayers":{ + "shape":"CustomLayerList", + "documentation":"

Specifies the custom layers for the style. Leave unset to not enable any custom layer, or, for styles that support custom layers, you can enable layer(s), such as POI layer for the VectorEsriNavigation style. Default is unset.

Currenlty only VectorEsriNavigation supports CustomLayers. For more information, see Custom Layers.

" + }, "PoliticalView":{ "shape":"CountryCode3OrEmpty", "documentation":"

Specifies the political view for the style. Set to an empty string to not use a political view, or, for styles that support specific political views, you can choose a view, such as IND for the Indian view.

Not all map resources or styles support political view styles. See Political views for more information.

" @@ -4398,7 +4418,7 @@ }, "SubMunicipality":{ "shape":"String", - "documentation":"

An area that's part of a larger municipality. For example, Blissville is a submunicipality in the Queen County in New York.

This property supported by Esri and OpenData. The Esri property is district, and the OpenData property is borough.

" + "documentation":"

An area that's part of a larger municipality. For example, Blissville is a submunicipality in the Queen County in New York.

This property is only returned for a place index that uses Esri as a data provider. The property is represented as a district.

For more information about data providers, see Amazon Location Service data providers.

" }, "SubRegion":{ "shape":"String", @@ -4414,11 +4434,11 @@ }, "UnitNumber":{ "shape":"String", - "documentation":"

For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

Returned only for a place index that uses Esri or Grab as a data provider. Is not returned for SearchPlaceIndexForPosition.

" + "documentation":"

For addresses with multiple units, the unit identifier. Can include numbers and letters, for example 3B or Unit 123.

This property is returned only for a place index that uses Esri or Grab as a data provider. It is not returned for SearchPlaceIndexForPosition.

" }, "UnitType":{ "shape":"String", - "documentation":"

For addresses with a UnitNumber, the type of unit. For example, Apartment.

Returned only for a place index that uses Esri as a data provider.

" + "documentation":"

For addresses with a UnitNumber, the type of unit. For example, Apartment.

This property is returned only for a place index that uses Esri as a data provider.

" } }, "documentation":"

Contains details about addresses or points of interest that match the search criteria.

Not all details are included with all responses. Some details may only be returned by specific data partners.

" @@ -4708,7 +4728,7 @@ }, "PlaceId":{ "shape":"PlaceId", - "documentation":"

The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place.

The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID.

For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

" + "documentation":"

The unique identifier of the Place. You can use this with the GetPlace operation to find the place again later, or to get full information for the Place.

The GetPlace request must use the same PlaceIndex resource as the SearchPlaceIndexForSuggestions that generated the Place ID.

For SearchPlaceIndexForSuggestions operations, the PlaceId is returned by place indexes that use Esri, Grab, or HERE as data providers.

While you can use PlaceID in subsequent requests, PlaceID is not intended to be a permanent identifier and the ID can change between consecutive API calls. Please see the following PlaceID behaviour for each data provider:

" }, "SupplementalCategories":{ "shape":"PlaceSupplementalCategoryList", diff -Nru awscli-2.15.9/awscli/botocore/data/logs/2014-03-28/service-2.json awscli-2.15.22/awscli/botocore/data/logs/2014-03-28/service-2.json --- awscli-2.15.9/awscli/botocore/data/logs/2014-03-28/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/logs/2014-03-28/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -110,7 +110,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates a log group with the specified name. You can create up to 1,000,000 log groups per Region per account.

You must use the following guidelines when naming a log group:

When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.

If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.

If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.

" + "documentation":"

Creates a log group with the specified name. You can create up to 1,000,000 log groups per Region per account.

You must use the following guidelines when naming a log group:

When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.

If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.

If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.

" }, "CreateLogStream":{ "name":"CreateLogStream", @@ -140,7 +140,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"OperationAbortedException"} ], - "documentation":"

Deletes a CloudWatch Logs account policy.

To use this operation, you must be signed on with the logs:DeleteDataProtectionPolicy and logs:DeleteAccountPolicy permissions.

" + "documentation":"

Deletes a CloudWatch Logs account policy. This stops the policy from applying to all log groups or a subset of log groups in the account. Log-group level policies will still be in effect.

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are deleting.

" }, "DeleteDataProtectionPolicy":{ "name":"DeleteDataProtectionPolicy", @@ -387,7 +387,7 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Retrieves a list of the deliveries that have been created in the account.

" + "documentation":"

Retrieves a list of the deliveries that have been created in the account.

A delivery is a connection between a delivery source and a delivery destination .

A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.

" }, "DescribeDeliveryDestinations":{ "name":"DescribeDeliveryDestinations", @@ -612,7 +612,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns complete information about one delivery. A delivery is a connection between a logical delivery source and a logical delivery destination

You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.

" + "documentation":"

Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination .

A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.

You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.

" }, "GetDeliveryDestination":{ "name":"GetDeliveryDestination", @@ -818,7 +818,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an account-level data protection policy that applies to all log groups in the account. A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can also use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

" + "documentation":"

Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account.

Data protection policy

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

Subscription filter policy

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

" }, "PutDataProtectionPolicy":{ "name":"PutDataProtectionPolicy", @@ -1029,7 +1029,7 @@ {"shape":"LimitExceededException"}, {"shape":"InvalidOperationException"} ], - "documentation":"

Starts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of log events that have been recently ingested in the log groups. For more information, see Use Live Tail to view logs in near real time.

The response to this operation is a response stream, over which the server sends live log events and the client receives them.

The following objects are sent over the stream:

You can end a session before it times out by closing the session stream or by closing the client that is receiving the stream. The session also ends if the established connection between the client and the server breaks.

", + "documentation":"

Starts a Live Tail streaming session for one or more log groups. A Live Tail session returns a stream of log events that have been recently ingested in the log groups. For more information, see Use Live Tail to view logs in near real time.

The response to this operation is a response stream, over which the server sends live log events and the client receives them.

The following objects are sent over the stream:

You can end a session before it times out by closing the session stream or by closing the client that is receiving the stream. The session also ends if the established connection between the client and the server breaks.

For examples of using an SDK to start a Live Tail session, see Start a Live Tail session using an Amazon Web Services SDK.

", "endpoint":{"hostPrefix":"streaming-"} }, "StartQuery":{ @@ -1218,6 +1218,10 @@ "shape":"Scope", "documentation":"

The scope of the account policy.

" }, + "selectionCriteria":{ + "shape":"SelectionCriteria", + "documentation":"

The log group selection criteria for this subscription filter policy.

" + }, "accountId":{ "shape":"AccountId", "documentation":"

The Amazon Web Services account ID that the policy applies to.

" @@ -1589,7 +1593,7 @@ }, "logGroupClass":{ "shape":"LogGroupClass", - "documentation":"

Use this parameter to specify the log group class for this log group. There are two classes:

If you omit this parameter, the default of STANDARD is used.

After a log group is created, its class can't be changed.

For details about the features supported by each class, see Log classes

" + "documentation":"

Use this parameter to specify the log group class for this log group. There are two classes:

If you omit this parameter, the default of STANDARD is used.

The value of logGroupClass can't be changed after a log group is created.

For details about the features supported by each class, see Log classes

" } } }, @@ -1646,7 +1650,7 @@ }, "policyType":{ "shape":"PolicyType", - "documentation":"

The type of policy to delete. Currently, the only valid value is DATA_PROTECTION_POLICY.

" + "documentation":"

The type of policy to delete.

" } } }, @@ -1970,7 +1974,7 @@ "members":{ "policyType":{ "shape":"PolicyType", - "documentation":"

Use this parameter to limit the returned policies to only the policies that match the policy type that you specify. Currently, the only valid value is DATA_PROTECTION_POLICY.

" + "documentation":"

Use this parameter to limit the returned policies to only the policies that match the policy type that you specify.

" }, "policyName":{ "shape":"PolicyName", @@ -3273,7 +3277,7 @@ }, "arn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the log group.

" + "documentation":"

The Amazon Resource Name (ARN) of the log group. This version of the ARN includes a trailing :* after the log group name.

Use this version to refer to the ARN in IAM policies when specifying permissions for most API actions. The exception is when specifying permissions for TagResource, UntagResource, and ListTagsForResource. The permissions for those three actions require the ARN version that doesn't include a trailing :*.

" }, "storedBytes":{ "shape":"StoredBytes", @@ -3294,6 +3298,10 @@ "logGroupClass":{ "shape":"LogGroupClass", "documentation":"

This specifies the log group class for this log group. There are two classes:

For details about the features supported by each class, see Log classes

" + }, + "logGroupArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the log group. This version of the ARN doesn't include a trailing :* after the log group name.

Use this version to refer to the ARN in the following situations:

" } }, "documentation":"

Represents a log group.

" @@ -3658,7 +3666,10 @@ "PolicyName":{"type":"string"}, "PolicyType":{ "type":"string", - "enum":["DATA_PROTECTION_POLICY"] + "enum":[ + "DATA_PROTECTION_POLICY", + "SUBSCRIPTION_FILTER_POLICY" + ] }, "Priority":{ "type":"string", @@ -3678,15 +3689,19 @@ }, "policyDocument":{ "shape":"AccountPolicyDocument", - "documentation":"

Specify the data protection policy, in JSON.

This policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters.

" + "documentation":"

Specify the policy, in JSON.

Data protection policy

A data protection policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters long.

Subscription filter policy

A subscription filter policy can include the following attributes in a JSON block:

" }, "policyType":{ "shape":"PolicyType", - "documentation":"

Currently the only valid value for this parameter is DATA_PROTECTION_POLICY.

" + "documentation":"

The type of policy that you're creating or updating.

" }, "scope":{ "shape":"Scope", "documentation":"

Currently the only valid value for this parameter is ALL, which specifies that the data protection policy applies to all log groups in the account. If you omit this parameter, the default of ALL is used.

" + }, + "selectionCriteria":{ + "shape":"SelectionCriteria", + "documentation":"

Use this parameter to apply the subscription filter policy to a subset of log groups in the account. Currently, the only supported filter is LogGroupName NOT IN []. The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

Using the selectionCriteria parameter is useful to help prevent infinite loops. For more information, see Log recursion prevention.

Specifing selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY for policyType.

" } } }, @@ -3811,7 +3826,7 @@ }, "logType":{ "shape":"LogType", - "documentation":"

Defines the type of log that the source is sending. For valid values for this parameter, see the documentation for the source service.

" + "documentation":"

Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS.

" }, "tags":{ "shape":"Tags", @@ -4321,6 +4336,7 @@ "type":"list", "member":{"shape":"SearchedLogStream"} }, + "SelectionCriteria":{"type":"string"}, "SequenceToken":{ "type":"string", "min":1 @@ -4416,11 +4432,11 @@ }, "logStreamNames":{ "shape":"InputLogStreamNames", - "documentation":"

If you specify this parameter, then only log events in the log streams that you specify here are included in the Live Tail session.

You can specify this parameter only if you specify only one log group in logGroupIdentifiers.

" + "documentation":"

If you specify this parameter, then only log events in the log streams that you specify here are included in the Live Tail session.

If you specify this field, you can't also specify the logStreamNamePrefixes field.

You can specify this parameter only if you specify only one log group in logGroupIdentifiers.

" }, "logStreamNamePrefixes":{ "shape":"InputLogStreamNames", - "documentation":"

If you specify this parameter, then only log events in the log streams that have names that start with the prefixes that you specify here are included in the Live Tail session.

You can specify this parameter only if you specify only one log group in logGroupIdentifiers.

" + "documentation":"

If you specify this parameter, then only log events in the log streams that have names that start with the prefixes that you specify here are included in the Live Tail session.

If you specify this field, you can't also specify the logStreamNames field.

You can specify this parameter only if you specify only one log group in logGroupIdentifiers.

" }, "logEventFilterPattern":{ "shape":"FilterPattern", diff -Nru awscli-2.15.9/awscli/botocore/data/lookoutequipment/2020-12-15/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/lookoutequipment/2020-12-15/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/lookoutequipment/2020-12-15/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/lookoutequipment/2020-12-15/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/lookoutequipment/2020-12-15/service-2.json awscli-2.15.22/awscli/botocore/data/lookoutequipment/2020-12-15/service-2.json --- awscli-2.15.9/awscli/botocore/data/lookoutequipment/2020-12-15/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/lookoutequipment/2020-12-15/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1236,6 +1236,10 @@ "OffCondition":{ "shape":"OffCondition", "documentation":"

Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.

" + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

The Amazon S3 location where you want Amazon Lookout for Equipment to save the pointwise model diagnostics. You must also specify the RoleArn request parameter.

" } } }, @@ -1401,7 +1405,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:dataset\\/.+" + "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:dataset\\/[0-9a-zA-Z_-]{1,200}\\/.+" }, "DatasetIdentifier":{ "type":"string", @@ -1658,7 +1662,7 @@ }, "IngestedFilesSummary":{ "shape":"IngestedFilesSummary", - "documentation":"

IngestedFilesSummary associated with the given dataset for the latest successful associated ingestion job id.

" + "documentation":"

IngestedFilesSummary associated with the given dataset for the latest successful associated ingestion job id.

" }, "RoleArn":{ "shape":"IamRoleArn", @@ -2021,6 +2025,10 @@ "RetrainingSchedulerStatus":{ "shape":"RetrainingSchedulerStatus", "documentation":"

Indicates the status of the retraining scheduler.

" + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

Configuration information for the model's pointwise model diagnostics.

" } } }, @@ -2165,6 +2173,14 @@ "AutoPromotionResultReason":{ "shape":"AutoPromotionResultReason", "documentation":"

Indicates the reason for the AutoPromotionResult. For example, a model might not be promoted if its performance was worse than the active version, if there was an error during training, or if the retraining scheduler was using MANUAL promote mode. The model will be promoted in MANAGED promote mode if the performance is better than the previous model.

" + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

The Amazon S3 location where Amazon Lookout for Equipment saves the pointwise model diagnostics for the model version.

" + }, + "ModelDiagnosticsResultsObject":{ + "shape":"S3Object", + "documentation":"

The Amazon S3 output prefix for where Lookout for Equipment saves the pointwise model diagnostics for the model version.

" } } }, @@ -2766,7 +2782,7 @@ }, "SensorsWithShortDateRange":{ "shape":"SensorsWithShortDateRange", - "documentation":"

Parameter that describes the total number of sensors that have a short date range of less than 90 days of data overall.

" + "documentation":"

Parameter that describes the total number of sensors that have a short date range of less than 14 days of data overall.

" } }, "documentation":"

Entity that comprises aggregated information on sensors having insufficient data.

" @@ -3109,7 +3125,7 @@ }, "InferenceExecutionSummaries":{ "shape":"InferenceExecutionSummaries", - "documentation":"

Provides an array of information about the individual inference executions returned from the ListInferenceExecutions operation, including model used, inference scheduler, data configuration, and so on.

" + "documentation":"

Provides an array of information about the individual inference executions returned from the ListInferenceExecutions operation, including model used, inference scheduler, data configuration, and so on.

If you don't supply the InferenceSchedulerName request parameter, or if you supply the name of an inference scheduler that doesn't exist, ListInferenceExecutions returns an empty array in InferenceExecutionSummaries.

" } } }, @@ -3187,7 +3203,7 @@ "members":{ "LabelGroupName":{ "shape":"LabelGroupName", - "documentation":"

Retruns the name of the label group.

" + "documentation":"

Returns the name of the label group.

" }, "IntervalStartTime":{ "shape":"Timestamp", @@ -3224,7 +3240,7 @@ }, "LabelSummaries":{ "shape":"LabelSummaries", - "documentation":"

A summary of the items in the label group.

" + "documentation":"

A summary of the items in the label group.

If you don't supply the LabelGroupName request parameter, or if you supply the name of a label group that doesn't exist, ListLabels returns an empty array in LabelSummaries.

" } } }, @@ -3279,7 +3295,7 @@ }, "ModelVersionSummaries":{ "shape":"ModelVersionSummaries", - "documentation":"

Provides information on the specified model version, including the created time, model and dataset ARNs, and status.

" + "documentation":"

Provides information on the specified model version, including the created time, model and dataset ARNs, and status.

If you don't supply the ModelName request parameter, or if you supply the name of a model that doesn't exist, ListModelVersions returns an empty array in ModelVersionSummaries.

" } } }, @@ -3458,6 +3474,36 @@ "min":20, "pattern":"arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+" }, + "ModelDiagnosticsOutputConfiguration":{ + "type":"structure", + "required":["S3OutputConfiguration"], + "members":{ + "S3OutputConfiguration":{ + "shape":"ModelDiagnosticsS3OutputConfiguration", + "documentation":"

The Amazon S3 location for the pointwise model diagnostics.

" + }, + "KmsKeyId":{ + "shape":"NameOrArn", + "documentation":"

The Amazon Web Services Key Management Service (KMS) key identifier to encrypt the pointwise model diagnostics files.

" + } + }, + "documentation":"

Output configuration information for the pointwise model diagnostics for an Amazon Lookout for Equipment model.

" + }, + "ModelDiagnosticsS3OutputConfiguration":{ + "type":"structure", + "required":["Bucket"], + "members":{ + "Bucket":{ + "shape":"S3Bucket", + "documentation":"

The name of the Amazon S3 bucket where the pointwise model diagnostics are located. You must be the owner of the Amazon S3 bucket.

" + }, + "Prefix":{ + "shape":"S3Prefix", + "documentation":"

The Amazon S3 prefix for the location of the pointwise model diagnostics. The prefix specifies the folder and evaluation result file name. (bucket).

When you call CreateModel or UpdateModel, specify the path within the bucket that you want Lookout for Equipment to save the model to. During training, Lookout for Equipment creates the model evaluation model as a compressed JSON file with the name model_diagnostics_results.json.gz.

When you call DescribeModel or DescribeModelVersion, prefix contains the file path and filename of the model evaluation file.

" + } + }, + "documentation":"

The Amazon S3 location for the pointwise model diagnostics for an Amazon Lookout for Equipment model.

" + }, "ModelMetrics":{ "type":"string", "max":50000, @@ -3543,7 +3589,8 @@ "RetrainingSchedulerStatus":{ "shape":"RetrainingSchedulerStatus", "documentation":"

Indicates the status of the retraining scheduler.

" - } + }, + "ModelDiagnosticsOutputConfiguration":{"shape":"ModelDiagnosticsOutputConfiguration"} }, "documentation":"

Provides information about the specified machine learning model, including dataset and model names and ARNs, as well as status.

" }, @@ -3555,7 +3602,7 @@ "type":"string", "max":2048, "min":20, - "pattern":"^arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/.+\\/.+\\/model-version\\/[0-9]{1,}$" + "pattern":"^arn:aws(-[^:]+)?:lookoutequipment:[a-zA-Z0-9\\-]*:[0-9]{12}:model\\/[0-9a-zA-Z_-]{1,200}\\/.+\\/model-version\\/[0-9]{1,}$" }, "ModelVersionSourceType":{ "type":"string", @@ -3888,7 +3935,7 @@ "members":{ "AffectedSensorCount":{ "shape":"Integer", - "documentation":"

Indicates the number of sensors that have less than 90 days of data.

" + "documentation":"

Indicates the number of sensors that have less than 14 days of data.

" } }, "documentation":"

Entity that comprises information on sensors that have shorter date range.

" @@ -4303,6 +4350,10 @@ "RoleArn":{ "shape":"IamRoleArn", "documentation":"

The ARN of the model to update.

" + }, + "ModelDiagnosticsOutputConfiguration":{ + "shape":"ModelDiagnosticsOutputConfiguration", + "documentation":"

The Amazon S3 location where you want Amazon Lookout for Equipment to save the pointwise model diagnostics for the model. You must also specify the RoleArn request parameter.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/macie2/2020-01-01/service-2.json awscli-2.15.22/awscli/botocore/data/macie2/2020-01-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/macie2/2020-01-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/macie2/2020-01-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -3777,7 +3777,7 @@ "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of buckets whose default encryption settings are configured to encrypt new objects with an Amazon Web Services managed KMS key or a customer managed KMS key. By default, these buckets encrypt new objects automatically using SSE-KMS encryption.

" + "documentation": "

The total number of buckets whose default encryption settings are configured to encrypt new objects with an KMS key, either an Amazon Web Services managed key or a customer managed key. By default, these buckets encrypt new objects automatically using DSSE-KMS or SSE-KMS encryption.

" }, "s3Managed": { "shape": "__long", @@ -4118,7 +4118,7 @@ "type": { "shape": "Type", "locationName": "type", - "documentation": "

The server-side encryption algorithm that's used by default to encrypt objects that are added to the bucket. Possible values are:

" + "documentation": "

The server-side encryption algorithm that's used by default to encrypt objects that are added to the bucket. Possible values are:

" } }, "documentation": "

Provides information about the default server-side encryption settings for an S3 bucket. For detailed information about these settings, see Setting default server-side encryption behavior for Amazon S3 buckets in the Amazon Simple Storage Service User Guide.

" @@ -5410,7 +5410,8 @@ "NONE", "AES256", "aws:kms", - "UNKNOWN" + "UNKNOWN", + "aws:kms:dsse" ] }, "ErrorCode": { @@ -6377,7 +6378,7 @@ "reasons": { "shape": "__listOfUnavailabilityReasonCode", "locationName": "reasons", - "documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

This value is null if sensitive data can be retrieved for the finding.

" + "documentation": "

Specifies why occurrences of sensitive data can't be retrieved for the finding. Possible values are:

This value is null if sensitive data can be retrieved for the finding.

" } } }, @@ -7687,17 +7688,17 @@ "customerManaged": { "shape": "__long", "locationName": "customerManaged", - "documentation": "

The total number of objects that are encrypted with a customer-provided key. The objects use customer-provided server-side encryption (SSE-C).

" + "documentation": "

The total number of objects that are encrypted with customer-provided keys. The objects use server-side encryption with customer-provided keys (SSE-C).

" }, "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of objects that are encrypted with an KMS key, either an Amazon Web Services managed key or a customer managed key. The objects use KMS encryption (SSE-KMS).

" + "documentation": "

The total number of objects that are encrypted with KMS keys, either Amazon Web Services managed keys or customer managed keys. The objects use dual-layer server-side encryption or server-side encryption with KMS keys (DSSE-KMS or SSE-KMS).

" }, "s3Managed": { "shape": "__long", "locationName": "s3Managed", - "documentation": "

The total number of objects that are encrypted with an Amazon S3 managed key. The objects use Amazon S3 managed encryption (SSE-S3).

" + "documentation": "

The total number of objects that are encrypted with Amazon S3 managed keys. The objects use server-side encryption with Amazon S3 managed keys (SSE-S3).

" }, "unencrypted": { "shape": "__long", @@ -8067,12 +8068,12 @@ "externalId": { "shape": "__string", "locationName": "externalId", - "documentation": "

The external ID to specify in the trust policy for the IAM role to assume when retrieving sensitive data from affected S3 objects (roleName). The trust policy must include an sts:ExternalId condition that requires this ID.

This ID is a unique alphanumeric string that Amazon Macie generates automatically after you configure it to assume a role. This value is null if the value for retrievalMode is CALLER_CREDENTIALS.

" + "documentation": "

The external ID to specify in the trust policy for the IAM role to assume when retrieving sensitive data from affected S3 objects (roleName). This value is null if the value for retrievalMode is CALLER_CREDENTIALS.

This ID is a unique alphanumeric string that Amazon Macie generates automatically after you configure it to assume an IAM role. For a Macie administrator to retrieve sensitive data from an affected S3 object for a member account, the trust policy for the role in the member account must include an sts:ExternalId condition that requires this ID.

" }, "retrievalMode": { "shape": "RetrievalMode", "locationName": "retrievalMode", - "documentation": "

The access method that's used when retrieving sensitive data from affected S3 objects. Valid values are: ASSUME_ROLE, assume an IAM role that is in the affected Amazon Web Services account and delegates access to Amazon Macie (roleName); and, CALLER_CREDENTIALS, use the credentials of the IAM user who requests the sensitive data.

" + "documentation": "

The access method that's used to retrieve sensitive data from affected S3 objects. Valid values are: ASSUME_ROLE, assume an IAM role that is in the affected Amazon Web Services account and delegates access to Amazon Macie (roleName); and, CALLER_CREDENTIALS, use the credentials of the IAM user who requests the sensitive data.

" }, "roleName": { "shape": "__stringMin1Max64PatternW", @@ -8104,7 +8105,7 @@ "status": { "shape": "RevealStatus", "locationName": "status", - "documentation": "

The status of the configuration for the Amazon Macie account. In a request, valid values are: ENABLED, enable the configuration for the account; and, DISABLED, disable the configuration for the account. In a response, possible values are: ENABLED, the configuration is currently enabled for the account; and, DISABLED, the configuration is currently disabled for the account.

" + "documentation": "

The status of the configuration for the Amazon Macie account. In a response, possible values are: ENABLED, the configuration is currently enabled for the account; and, DISABLED, the configuration is currently disabled for the account. In a request, valid values are: ENABLED, enable the configuration for the account; and, DISABLED, disable the configuration for the account.

If you disable the configuration, you also permanently delete current settings that specify how to access affected S3 objects. If your current access method is ASSUME_ROLE, Macie also deletes the external ID and role name currently specified for the configuration. These settings can't be recovered after they're deleted.

" } }, "documentation": "

Specifies the status of the Amazon Macie configuration for retrieving occurrences of sensitive data reported by findings, and the Key Management Service (KMS) key to use to encrypt sensitive data that's retrieved. When you enable the configuration for the first time, your request must specify an KMS key. Otherwise, an error occurs.

", @@ -9261,7 +9262,8 @@ "enum": [ "NONE", "AES256", - "aws:kms" + "aws:kms", + "aws:kms:dsse" ] }, "UnavailabilityReasonCode": { @@ -9293,7 +9295,7 @@ "message": { "shape": "__string", "locationName": "message", - "documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

" + "documentation": "

The type of error that occurred and prevented Amazon Macie from retrieving occurrences of sensitive data reported by the finding. Possible values are:

" } }, "documentation": "

Provides information about an error that occurred due to an unprocessable entity.

", @@ -9641,7 +9643,7 @@ "documentation": "

The name of the IAM role that is in the affected Amazon Web Services account and Amazon Macie is allowed to assume when retrieving sensitive data from affected S3 objects for the account. The trust and permissions policies for the role must meet all requirements for Macie to assume the role.

" } }, - "documentation": "

Specifies the access method and settings to use when retrieving occurrences of sensitive data reported by findings. If your request specifies an Identity and Access Management (IAM) role to assume when retrieving the sensitive data, Amazon Macie verifies that the role exists and the attached policies are configured correctly. If there's an issue, Macie returns an error. For information about addressing the issue, see Retrieving sensitive data samples with findings in the Amazon Macie User Guide.

", + "documentation": "

Specifies the access method and settings to use when retrieving occurrences of sensitive data reported by findings. If your request specifies an Identity and Access Management (IAM) role to assume, Amazon Macie verifies that the role exists and the attached policies are configured correctly. If there's an issue, Macie returns an error. For information about addressing the issue, see Configuration options and requirements for retrieving sensitive data samples in the Amazon Macie User Guide.

", "required": [ "retrievalMode" ] @@ -9657,7 +9659,7 @@ "retrievalConfiguration": { "shape": "UpdateRetrievalConfiguration", "locationName": "retrievalConfiguration", - "documentation": "

The access method and settings to use to retrieve the sensitive data.

" + "documentation": "

The access method and settings to use when retrieving the sensitive data.

" } }, "required": [ @@ -9675,7 +9677,7 @@ "retrievalConfiguration": { "shape": "RetrievalConfiguration", "locationName": "retrievalConfiguration", - "documentation": "

The access method and settings to use to retrieve the sensitive data.

" + "documentation": "

The access method and settings to use when retrieving the sensitive data.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/managedblockchain-query/2023-05-04/service-2.json awscli-2.15.22/awscli/botocore/data/managedblockchain-query/2023-05-04/service-2.json --- awscli-2.15.9/awscli/botocore/data/managedblockchain-query/2023-05-04/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/managedblockchain-query/2023-05-04/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -30,7 +30,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Gets the token balance for a batch of tokens by using the BatchGetTokenBalance action for every token in the request.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" + "documentation":"

Gets the token balance for a batch of tokens by using the BatchGetTokenBalance action for every token in the request.

Only the native tokens BTC and ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" }, "GetAssetContract":{ "name":"GetAssetContract", @@ -68,7 +68,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Gets the balance of a specific token, including native tokens, for a given address (wallet or contract) on the blockchain.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" + "documentation":"

Gets the balance of a specific token, including native tokens, for a given address (wallet or contract) on the blockchain.

Only the native tokens BTC and ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" }, "GetTransaction":{ "name":"GetTransaction", @@ -87,7 +87,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Get the details of a transaction.

" + "documentation":"

Gets the details of a transaction.

This action will return transaction details for all transactions that are confirmed on the blockchain, even if they have not reached finality.

" }, "ListAssetContracts":{ "name":"ListAssetContracts", @@ -141,7 +141,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

An array of TransactionEvent objects. Each object contains details about the transaction event.

" + "documentation":"

An array of TransactionEvent objects. Each object contains details about the transaction event.

This action will return transaction details for all transactions that are confirmed on the blockchain, even if they have not reached finality.

" }, "ListTransactions":{ "name":"ListTransactions", @@ -323,7 +323,26 @@ }, "ConfirmationStatus":{ "type":"string", - "enum":["FINAL"] + "enum":[ + "FINAL", + "NONFINAL" + ] + }, + "ConfirmationStatusFilter":{ + "type":"structure", + "required":["include"], + "members":{ + "include":{ + "shape":"ConfirmationStatusIncludeList", + "documentation":"

The container to determine whether to list results that have only reached finality . Transactions that have reached finality are always part of the response.

" + } + }, + "documentation":"

The container for the ConfirmationStatusFilter that filters for the finality of the results.

" + }, + "ConfirmationStatusIncludeList":{ + "type":"list", + "member":{"shape":"ConfirmationStatus"}, + "min":1 }, "ContractFilter":{ "type":"structure", @@ -545,7 +564,7 @@ }, "maxResults":{ "shape":"ListAssetContractsInputMaxResultsInteger", - "documentation":"

The maximum number of contracts to list.

" + "documentation":"

The maximum number of contracts to list.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" } } }, @@ -587,7 +606,7 @@ }, "maxResults":{ "shape":"ListTokenBalancesInputMaxResultsInteger", - "documentation":"

The maximum number of token balances to return.

" + "documentation":"

The maximum number of token balances to return.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" } } }, @@ -632,7 +651,7 @@ }, "maxResults":{ "shape":"ListTransactionEventsInputMaxResultsInteger", - "documentation":"

The maximum number of transaction events to list.

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + "documentation":"

The maximum number of transaction events to list.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" } } }, @@ -675,7 +694,7 @@ "toBlockchainInstant":{"shape":"BlockchainInstant"}, "sort":{ "shape":"ListTransactionsSort", - "documentation":"

Sorts items in an ascending order if the first page starts at fromTime. Sorts items in a descending order if the first page starts at toTime.

" + "documentation":"

The order by which the results will be sorted. If ASCENNDING is selected, the results will be ordered by fromTime.

" }, "nextToken":{ "shape":"NextToken", @@ -683,7 +702,11 @@ }, "maxResults":{ "shape":"ListTransactionsInputMaxResultsInteger", - "documentation":"

The maximum number of transactions to list.

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + "documentation":"

The maximum number of transactions to list.

Default:100

Even if additional results can be retrieved, the request can return less results than maxResults or an empty array of results.

To retrieve the next set of results, make another request with the returned nextToken value. The value of nextToken is null when there are no more results to return

" + }, + "confirmationStatusFilter":{ + "shape":"ConfirmationStatusFilter", + "documentation":"

This filter is used to include transactions in the response that haven't reached finality . Transactions that have reached finiality are always part of the response.

" } } }, @@ -797,13 +820,6 @@ "type":"string", "pattern":"(0x[A-Fa-f0-9]{64}|[A-Fa-f0-9]{64})" }, - "QueryTransactionStatus":{ - "type":"string", - "enum":[ - "FINAL", - "FAILED" - ] - }, "QuotaCode":{"type":"string"}, "ResourceId":{"type":"string"}, "ResourceNotFoundException":{ @@ -990,10 +1006,10 @@ }, "tokenId":{ "shape":"QueryTokenId", - "documentation":"

The unique identifier of the token.

You must specify this container with btc for the native BTC token, and eth for the native ETH token. For all other token types you must specify the tokenId in the 64 character hexadecimal tokenid format.

" + "documentation":"

The unique identifier of the token.

For native tokens, use the 3 character abbreviation that best matches your token. For example, btc for Bitcoin, eth for Ether, etc. For all other token types you must specify the tokenId in the 64 character hexadecimal tokenid format.

" } }, - "documentation":"

The container for the identifier for the token including the unique token ID and its blockchain network.

Only the native tokens BTC,ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" + "documentation":"

The container for the identifier for the token including the unique token ID and its blockchain network.

Only the native tokens BTC and ETH, and the ERC-20, ERC-721, and ERC 1155 token standards are supported.

" }, "Transaction":{ "type":"structure", @@ -1034,12 +1050,6 @@ "shape":"Long", "documentation":"

The number of transactions in the block.

" }, - "status":{ - "shape":"QueryTransactionStatus", - "documentation":"

The status of the transaction.

This property is deprecated. You must use the confirmationStatus and the executionStatus properties to determine if the status of the transaction is FINAL or FAILED.

  • Transactions with a status of FINAL will now have the confirmationStatus set to FINAL and the executionStatus set to SUCCEEDED.

  • Transactions with a status of FAILED will now have the confirmationStatus set to FINAL and the executionStatus set to FAILED.

", - "deprecated":true, - "deprecatedMessage":"The status field in the GetTransaction response is deprecated and is replaced with the confirmationStatus and executionStatus fields." - }, "to":{ "shape":"ChainAddress", "documentation":"

The identifier of the transaction. It is generated whenever a transaction is verified and added to the blockchain.

" @@ -1171,6 +1181,10 @@ "transactionTimestamp":{ "shape":"Timestamp", "documentation":"

The time when the transaction occurred.

" + }, + "confirmationStatus":{ + "shape":"ConfirmationStatus", + "documentation":"

Specifies whether to list transactions that have not reached Finality.

" } }, "documentation":"

The container of the transaction output.

" diff -Nru awscli-2.15.9/awscli/botocore/data/marketplace-catalog/2018-09-17/service-2.json awscli-2.15.22/awscli/botocore/data/marketplace-catalog/2018-09-17/service-2.json --- awscli-2.15.9/awscli/botocore/data/marketplace-catalog/2018-09-17/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/marketplace-catalog/2018-09-17/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -27,7 +27,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns metadata and content for multiple entities.

" + "documentation":"

Returns metadata and content for multiple entities. This is the Batch version of the DescribeEntity API and uses the same IAM permission action as DescribeEntity API.

" }, "CancelChangeSet":{ "name":"CancelChangeSet", @@ -296,7 +296,7 @@ "documentation":"

The visibility of the AMI product.

" } }, - "documentation":"

Object containing all the filter fields for AMI products. Client can add a maximum of 8 filters in a single ListEntities request.

" + "documentation":"

Object containing all the filter fields for AMI products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

" }, "AmiProductLastModifiedDateFilter":{ "type":"structure", @@ -685,7 +685,7 @@ "documentation":"

The visibility of the container product.

" } }, - "documentation":"

Object containing all the filter fields for container products. Client can add a maximum of 8 filters in a single ListEntities request.

" + "documentation":"

Object containing all the filter fields for container products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

" }, "ContainerProductLastModifiedDateFilter":{ "type":"structure", @@ -841,7 +841,7 @@ "documentation":"

The last date on which the data product was modified.

" } }, - "documentation":"

Object containing all the filter fields for data products. Client can add a maximum of 8 filters in a single ListEntities request.

" + "documentation":"

Object containing all the filter fields for data products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

" }, "DataProductLastModifiedDateFilter":{ "type":"structure", @@ -1015,6 +1015,10 @@ "shape":"ChangeSetName", "documentation":"

The optional name provided in the StartChangeSet request. If you do not provide a name, one is set by default.

" }, + "Intent":{ + "shape":"Intent", + "documentation":"

The optional intent provided in the StartChangeSet request. If you do not provide an intent, APPLY is set by default.

" + }, "StartTime":{ "shape":"DateTimeISO8601", "documentation":"

The date and time, in ISO 8601 format (2018-02-27T13:45:22Z), the request started.

" @@ -1400,6 +1404,13 @@ "min":1, "pattern":"^[\\w\\-@]+$" }, + "Intent":{ + "type":"string", + "enum":[ + "VALIDATE", + "APPLY" + ] + }, "InternalServiceException":{ "type":"structure", "members":{ @@ -1674,7 +1685,7 @@ "documentation":"

Allows filtering on the LastModifiedDate of an offer.

" } }, - "documentation":"

A filter for offers entity.

" + "documentation":"

Object containing all the filter fields for offers entity. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

" }, "OfferLastModifiedDateFilter":{ "type":"structure", @@ -2069,7 +2080,7 @@ "documentation":"

Allows filtering on the LastModifiedDate of a ResaleAuthorization.

" } }, - "documentation":"

A filter for ResaleAuthorization entity.

" + "documentation":"

Object containing all the filter fields for resale authorization entity. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

" }, "ResaleAuthorizationLastModifiedDateFilter":{ "type":"structure", @@ -2541,7 +2552,7 @@ "documentation":"

The last date on which the SaaS product was modified.

" } }, - "documentation":"

Object containing all the filter fields for SaaS products. Client can add a maximum of 8 filters in a single ListEntities request.

" + "documentation":"

Object containing all the filter fields for SaaS products. Client can add only one wildcard filter and a maximum of 8 filters in a single ListEntities request.

" }, "SaaSProductLastModifiedDateFilter":{ "type":"structure", @@ -2719,6 +2730,10 @@ "ChangeSetTags":{ "shape":"TagList", "documentation":"

A list of objects specifying each key name and value for the ChangeSetTags property.

" + }, + "Intent":{ + "shape":"Intent", + "documentation":"

The intent related to the request. The default is APPLY. To test your request before applying changes to your entities, use VALIDATE. This feature is currently available for adding versions to single-AMI products. For more information, see Add a new version.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json awscli-2.15.22/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json --- awscli-2.15.9/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -461,7 +461,9 @@ "documentation": "ResourceInUseException" } ], - "documentation": "Send an request with an empty body to the regional API endpoint to get your account API endpoint." + "documentation": "Send an request with an empty body to the regional API endpoint to get your account API endpoint.", + "deprecated": true, + "deprecatedMessage": "DescribeEndpoints and account specific endpoints are no longer required. We recommend that you send your requests directly to the regional endpoint instead." }, "DisassociateCertificate": { "name": "DisassociateCertificate", @@ -1172,7 +1174,7 @@ }, "AacCodecProfile": { "type": "string", - "documentation": "Specify the AAC profile. For the widest player compatibility and where higher bitrates are acceptable: Keep the default profile, LC (AAC-LC) For improved audio performance at lower bitrates: Choose HEV1 or HEV2. HEV1 (AAC-HE v1) adds spectral band replication to improve speech audio at low bitrates. HEV2 (AAC-HE v2) adds parametric stereo, which optimizes for encoding stereo audio at very low bitrates.", + "documentation": "AAC Profile.", "enum": [ "LC", "HEV1", @@ -1192,7 +1194,7 @@ }, "AacRateControlMode": { "type": "string", - "documentation": "Specify the AAC rate control mode. For a constant bitrate: Choose CBR. Your AAC output bitrate will be equal to the value that you choose for Bitrate. For a variable bitrate: Choose VBR. Your AAC output bitrate will vary according to your audio content and the value that you choose for Bitrate quality.", + "documentation": "Rate Control Mode.", "enum": [ "CBR", "VBR" @@ -1222,7 +1224,7 @@ "CodecProfile": { "shape": "AacCodecProfile", "locationName": "codecProfile", - "documentation": "Specify the AAC profile. For the widest player compatibility and where higher bitrates are acceptable: Keep the default profile, LC (AAC-LC) For improved audio performance at lower bitrates: Choose HEV1 or HEV2. HEV1 (AAC-HE v1) adds spectral band replication to improve speech audio at low bitrates. HEV2 (AAC-HE v2) adds parametric stereo, which optimizes for encoding stereo audio at very low bitrates." + "documentation": "AAC Profile." }, "CodingMode": { "shape": "AacCodingMode", @@ -1232,7 +1234,7 @@ "RateControlMode": { "shape": "AacRateControlMode", "locationName": "rateControlMode", - "documentation": "Specify the AAC rate control mode. For a constant bitrate: Choose CBR. Your AAC output bitrate will be equal to the value that you choose for Bitrate. For a variable bitrate: Choose VBR. Your AAC output bitrate will vary according to your audio content and the value that you choose for Bitrate quality." + "documentation": "Rate Control Mode." }, "RawFormat": { "shape": "AacRawFormat", @@ -1242,7 +1244,7 @@ "SampleRate": { "shape": "__integerMin8000Max96000", "locationName": "sampleRate", - "documentation": "Specify the AAC sample rate in samples per second (Hz). Valid sample rates depend on the AAC profile and Coding mode that you select. For a list of supported sample rates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html" + "documentation": "Specify the Sample rate in Hz. Valid sample rates depend on the Profile and Coding mode that you select. The following list shows valid sample rates for each Profile and Coding mode. * LC Profile, Coding mode 1.0, 2.0, and Receiver Mix: 8000, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200, 96000. * LC Profile, Coding mode 5.1: 32000, 44100, 48000, 96000. * HEV1 Profile, Coding mode 1.0 and Receiver Mix: 22050, 24000, 32000, 44100, 48000. * HEV1 Profile, Coding mode 2.0 and 5.1: 32000, 44100, 48000, 96000. * HEV2 Profile, Coding mode 2.0: 22050, 24000, 32000, 44100, 48000." }, "Specification": { "shape": "AacSpecification", @@ -1252,7 +1254,7 @@ "VbrQuality": { "shape": "AacVbrQuality", "locationName": "vbrQuality", - "documentation": "Specify the quality of your variable bitrate (VBR) AAC audio. For a list of approximate VBR bitrates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html#aac_vbr" + "documentation": "VBR Quality Level - Only used if rate_control_mode is VBR." } }, "documentation": "Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to \"VBR\" or \"CBR\". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode." @@ -1267,7 +1269,7 @@ }, "AacVbrQuality": { "type": "string", - "documentation": "Specify the quality of your variable bitrate (VBR) AAC audio. For a list of approximate VBR bitrates, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/aac-support.html#aac_vbr", + "documentation": "VBR Quality Level - Only used if rate_control_mode is VBR.", "enum": [ "LOW", "MEDIUM_LOW", @@ -1775,7 +1777,7 @@ "documentation": "Settings related to audio encoding. The settings in this group vary depending on the value that you choose for your audio codec." }, "CustomLanguageCode": { - "shape": "__stringPatternAZaZ23AZaZ", + "shape": "__stringPatternAZaZ23AZaZ09", "locationName": "customLanguageCode", "documentation": "Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control to Use configured. The service also uses your specified custom language code when you set Language code control to Follow input, but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming." }, @@ -3500,13 +3502,6 @@ }, "documentation": "Custom 3D lut settings" }, - "ColorConversion3DLUTSettings": { - "type": "list", - "documentation": "Use 3D LUTs to specify custom color mapping behavior when you convert from one color space into another. You can include up to 8 different 3D LUTs.", - "member": { - "shape": "ColorConversion3DLUTSetting" - } - }, "ColorCorrector": { "type": "structure", "members": { @@ -4373,7 +4368,9 @@ "enum": [ "DEFAULT", "GET_ONLY" - ] + ], + "deprecated": true, + "deprecatedMessage": "DescribeEndpoints and account specific endpoints are no longer required. We recommend that you send your requests directly to the regional endpoint instead." }, "DescribeEndpointsRequest": { "type": "structure", @@ -4394,7 +4391,9 @@ "documentation": "Use this string, provided with the response to a previous request, to request the next batch of endpoints." } }, - "documentation": "DescribeEndpointsRequest" + "documentation": "DescribeEndpointsRequest", + "deprecated": true, + "deprecatedMessage": "DescribeEndpoints and account specific endpoints are no longer required. We recommend that you send your requests directly to the regional endpoint instead." }, "DescribeEndpointsResponse": { "type": "structure", @@ -4409,7 +4408,9 @@ "locationName": "nextToken", "documentation": "Use this string to request the next batch of endpoints." } - } + }, + "deprecated": true, + "deprecatedMessage": "DescribeEndpoints and account specific endpoints are no longer required. We recommend that you send your requests directly to the regional endpoint instead." }, "DestinationSettings": { "type": "structure", @@ -8024,7 +8025,7 @@ "documentation": "Settings for ad avail blanking. Video can be blanked or overlaid with an image, and audio muted during SCTE-35 triggered ad avails." }, "ColorConversion3DLUTSettings": { - "shape": "ColorConversion3DLUTSettings", + "shape": "__listOfColorConversion3DLUTSetting", "locationName": "colorConversion3DLUTSettings", "documentation": "Use 3D LUTs to specify custom color mapping behavior when you convert from one color space into another. You can include up to 8 different 3D LUTs." }, @@ -8195,7 +8196,7 @@ "documentation": "Settings for ad avail blanking. Video can be blanked or overlaid with an image, and audio muted during SCTE-35 triggered ad avails." }, "ColorConversion3DLUTSettings": { - "shape": "ColorConversion3DLUTSettings", + "shape": "__listOfColorConversion3DLUTSetting", "locationName": "colorConversion3DLUTSettings", "documentation": "Use 3D LUTs to specify custom color mapping behavior when you convert from one color space into another. You can include up to 8 different 3D LUTs." }, @@ -11095,6 +11096,16 @@ "RemixSettings": { "type": "structure", "members": { + "AudioDescriptionAudioChannel": { + "shape": "__integerMin1Max64", + "locationName": "audioDescriptionAudioChannel", + "documentation": "Optionally specify the channel in your input that contains your audio description audio signal. MediaConvert mixes your audio signal across all output channels, while reducing their volume according to your data stream. When you specify an audio description audio channel, you must also specify an audio description data channel. For more information about audio description signals, see the BBC WHP 198 and 051 white papers." + }, + "AudioDescriptionDataChannel": { + "shape": "__integerMin1Max64", + "locationName": "audioDescriptionDataChannel", + "documentation": "Optionally specify the channel in your input that contains your audio description data stream. MediaConvert mixes your audio signal across all output channels, while reducing their volume according to your data stream. When you specify an audio description data channel, you must also specify an audio description audio channel. For more information about audio description signals, see the BBC WHP 198 and 051 white papers." + }, "ChannelMapping": { "shape": "ChannelMapping", "locationName": "channelMapping", @@ -13816,6 +13827,12 @@ "shape": "CmafAdditionalManifest" } }, + "__listOfColorConversion3DLUTSetting": { + "type": "list", + "member": { + "shape": "ColorConversion3DLUTSetting" + } + }, "__listOfDashAdditionalManifest": { "type": "list", "member": { @@ -14235,6 +14252,10 @@ "type": "string", "pattern": "^[A-Za-z]{2,3}(-[A-Za-z-]+)?$" }, + "__stringPatternAZaZ23AZaZ09": { + "type": "string", + "pattern": "^[A-Za-z]{2,3}(-[A-Za-z0-9-]+)?$" + }, "__stringPatternArnAwsUsGovAcm": { "type": "string", "pattern": "^arn:aws(-us-gov)?:acm:" diff -Nru awscli-2.15.9/awscli/botocore/data/mediatailor/2018-04-23/service-2.json awscli-2.15.22/awscli/botocore/data/mediatailor/2018-04-23/service-2.json --- awscli-2.15.9/awscli/botocore/data/mediatailor/2018-04-23/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/mediatailor/2018-04-23/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -555,6 +555,7 @@ }, "AdBreak":{ "type":"structure", + "required":["OffsetMillis"], "members":{ "AdBreakMetadata":{ "shape":"AdBreakMetadataList", diff -Nru awscli-2.15.9/awscli/botocore/data/mwaa/2020-07-01/service-2.json awscli-2.15.22/awscli/botocore/data/mwaa/2020-07-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/mwaa/2020-07-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/mwaa/2020-07-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -221,6 +221,11 @@ "value":{"shape":"ConfigValue"}, "sensitive":true }, + "AirflowIdentity":{ + "type":"string", + "max":64, + "min":1 + }, "AirflowVersion":{ "type":"string", "max":32, @@ -408,6 +413,14 @@ "CreateWebLoginTokenResponse":{ "type":"structure", "members":{ + "AirflowIdentity":{ + "shape":"AirflowIdentity", + "documentation":"

The user name of the Apache Airflow identity creating the web login token.

" + }, + "IamIdentity":{ + "shape":"IamIdentity", + "documentation":"

The name of the IAM identity creating the web login token. This might be an IAM user, or an assumed or federated identity. For example, assumed-role/Admin/your-name.

" + }, "WebServerHostname":{ "shape":"Hostname", "documentation":"

The Airflow web server hostname for the environment.

" @@ -518,7 +531,7 @@ }, "KmsKey":{ "shape":"KmsKey", - "documentation":"

The Amazon Web Services Key Management Service (KMS) encryption key used to encrypt the data in your environment.

" + "documentation":"

The KMS encryption key used to encrypt the data in your environment.

" }, "LastUpdate":{ "shape":"LastUpdate", @@ -582,7 +595,7 @@ }, "Status":{ "shape":"EnvironmentStatus", - "documentation":"

The status of the Amazon MWAA environment.

Valid values:

We recommend reviewing our troubleshooting guide for a list of common errors and their solutions. For more information, see Amazon MWAA troubleshooting.

" + "documentation":"

The status of the Amazon MWAA environment.

Valid values:

We recommend reviewing our troubleshooting guide for a list of common errors and their solutions. For more information, see Amazon MWAA troubleshooting.

" }, "Tags":{ "shape":"TagMap", @@ -594,7 +607,7 @@ }, "WebserverUrl":{ "shape":"WebserverUrl", - "documentation":"

The Apache Airflow Web server host name for the Amazon MWAA environment. For more information, see Accessing the Apache Airflow UI.

" + "documentation":"

The Apache Airflow web server host name for the Amazon MWAA environment. For more information, see Accessing the Apache Airflow UI.

" }, "WebserverVpcEndpointService":{ "shape":"VpcEndpointServiceName", @@ -641,7 +654,8 @@ "UPDATE_FAILED", "ROLLING_BACK", "CREATING_SNAPSHOT", - "PENDING" + "PENDING", + "MAINTENANCE" ] }, "ErrorCode":{"type":"string"}, @@ -678,6 +692,7 @@ "min":1, "pattern":"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])$" }, + "IamIdentity":{"type":"string"}, "IamRoleArn":{ "type":"string", "max":1224, diff -Nru awscli-2.15.9/awscli/botocore/data/neptune-graph/2023-11-29/service-2.json awscli-2.15.22/awscli/botocore/data/neptune-graph/2023-11-29/service-2.json --- awscli-2.15.9/awscli/botocore/data/neptune-graph/2023-11-29/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/neptune-graph/2023-11-29/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -30,11 +30,35 @@ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Deletes the specified import task

", + "documentation":"

Deletes the specified import task.

", "staticContextParams":{ "ApiType":{"value":"ControlPlane"} } }, + "CancelQuery":{ + "name":"CancelQuery", + "http":{ + "method":"DELETE", + "requestUri":"/queries/{queryId}", + "responseCode":200 + }, + "input":{"shape":"CancelQueryInput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Cancels a specified query.

", + "endpoint":{ + "hostPrefix":"{graphIdentifier}." + }, + "idempotent":true, + "staticContextParams":{ + "ApiType":{"value":"DataPlane"} + } + }, "CreateGraph":{ "name":"CreateGraph", "http":{ @@ -184,6 +208,31 @@ "ApiType":{"value":"ControlPlane"} } }, + "ExecuteQuery":{ + "name":"ExecuteQuery", + "http":{ + "method":"POST", + "requestUri":"/queries", + "responseCode":200 + }, + "input":{"shape":"ExecuteQueryInput"}, + "output":{"shape":"ExecuteQueryOutput"}, + "errors":[ + {"shape":"UnprocessableException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Execute an openCypher query. Currently, the SDK does not support parameterized queries. If you want to make a parameterized query call, you can use an HTTP request.

When invoking this operation in a Neptune Analytics cluster, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query:

Non-parametrized queries are not considered for plan caching. You can force plan caching with planCache=enabled. The plan cache will be reused only for the same exact query. Slight variations in the query will not be able to reuse the query plan cache.

", + "endpoint":{ + "hostPrefix":"{graphIdentifier}." + }, + "staticContextParams":{ + "ApiType":{"value":"DataPlane"} + } + }, "GetGraph":{ "name":"GetGraph", "http":{ @@ -224,6 +273,30 @@ "ApiType":{"value":"ControlPlane"} } }, + "GetGraphSummary":{ + "name":"GetGraphSummary", + "http":{ + "method":"GET", + "requestUri":"/summary", + "responseCode":200 + }, + "input":{"shape":"GetGraphSummaryInput"}, + "output":{"shape":"GetGraphSummaryOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets a graph summary for a property graph.

", + "endpoint":{ + "hostPrefix":"{graphIdentifier}." + }, + "staticContextParams":{ + "ApiType":{"value":"DataPlane"} + } + }, "GetImportTask":{ "name":"GetImportTask", "http":{ @@ -264,6 +337,30 @@ "ApiType":{"value":"ControlPlane"} } }, + "GetQuery":{ + "name":"GetQuery", + "http":{ + "method":"GET", + "requestUri":"/queries/{queryId}", + "responseCode":200 + }, + "input":{"shape":"GetQueryInput"}, + "output":{"shape":"GetQueryOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the status of a specified query.

When invoking this operation in a Neptune Analytics cluster, the IAM user or role making the request must have the neptune-graph:GetQueryStatus IAM action attached.

", + "endpoint":{ + "hostPrefix":"{graphIdentifier}." + }, + "staticContextParams":{ + "ApiType":{"value":"DataPlane"} + } + }, "ListGraphSnapshots":{ "name":"ListGraphSnapshots", "http":{ @@ -343,6 +440,29 @@ "ApiType":{"value":"ControlPlane"} } }, + "ListQueries":{ + "name":"ListQueries", + "http":{ + "method":"GET", + "requestUri":"/queries", + "responseCode":200 + }, + "input":{"shape":"ListQueriesInput"}, + "output":{"shape":"ListQueriesOutput"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists active openCypher queries.

", + "endpoint":{ + "hostPrefix":"{graphIdentifier}." + }, + "staticContextParams":{ + "ApiType":{"value":"DataPlane"} + } + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -471,6 +591,22 @@ } }, "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"String", + "documentation":"

A message describing the problem.

" + } + }, + "documentation":"

Raised in case of an authentication or authorization failure.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, "Arn":{ "type":"string", "max":1011, @@ -512,7 +648,7 @@ }, "source":{ "shape":"String", - "documentation":"

A URL identifying to the location of the data to be imported. This can be an Amazon S3 path, or can point to a Neptune database endpoint or snapshot

" + "documentation":"

A URL identifying to the location of the data to be imported. This can be an Amazon S3 path, or can point to a Neptune database endpoint or snapshot.

" }, "format":{ "shape":"Format", @@ -528,6 +664,28 @@ } } }, + "CancelQueryInput":{ + "type":"structure", + "required":[ + "graphIdentifier", + "queryId" + ], + "members":{ + "graphIdentifier":{ + "shape":"GraphIdentifier", + "documentation":"

The unique identifier of the Neptune Analytics graph.

", + "hostLabel":true, + "location":"header", + "locationName":"graphIdentifier" + }, + "queryId":{ + "shape":"String", + "documentation":"

The unique identifier of the query to cancel.

", + "location":"uri", + "locationName":"queryId" + } + } + }, "ConflictException":{ "type":"structure", "required":["message"], @@ -569,7 +727,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable.

" + "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable.

" }, "kmsKeyIdentifier":{ "shape":"KmsKeyArn", @@ -581,7 +739,7 @@ }, "replicaCount":{ "shape":"ReplicaCount", - "documentation":"

The number of replicas in other AZs. Min =0, Max = 2, Default =1

" + "documentation":"

The number of replicas in other AZs. Min =0, Max = 2, Default = 1.

" }, "deletionProtection":{ "shape":"Boolean", @@ -635,7 +793,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated.

" + "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated.

" }, "vectorSearchConfiguration":{ "shape":"VectorSearchConfiguration", @@ -740,7 +898,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable.

" + "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable).

" }, "kmsKeyIdentifier":{ "shape":"KmsKeyArn", @@ -1062,6 +1220,104 @@ } } }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, + "DocumentValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Document"} + }, + "EdgeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "EdgeProperties":{ + "type":"list", + "member":{"shape":"String"} + }, + "EdgeStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"

The number of instances of the edge in the graph.

" + }, + "edgeProperties":{ + "shape":"EdgeProperties", + "documentation":"

A list of the properties associated with the edge.

" + } + }, + "documentation":"

Contains information about an edge in a Neptune Analytics graph.

" + }, + "EdgeStructures":{ + "type":"list", + "member":{"shape":"EdgeStructure"} + }, + "ExecuteQueryInput":{ + "type":"structure", + "required":[ + "graphIdentifier", + "queryString", + "language" + ], + "members":{ + "graphIdentifier":{ + "shape":"GraphIdentifier", + "documentation":"

The unique identifier of the Neptune Analytics graph.

", + "hostLabel":true, + "location":"header", + "locationName":"graphIdentifier" + }, + "queryString":{ + "shape":"String", + "documentation":"

The query string to be executed.

", + "locationName":"query" + }, + "language":{ + "shape":"QueryLanguage", + "documentation":"

The query language the query is written in. Currently only openCypher is supported.

" + }, + "parameters":{ + "shape":"DocumentValuedMap", + "documentation":"

The data parameters the query can use in JSON format. For example: {\"name\": \"john\", \"age\": 20}. (optional)

" + }, + "planCache":{ + "shape":"PlanCacheType", + "documentation":"

Query plan cache is a feature that saves the query plan and reuses it on successive executions of the same query. This reduces query latency, and works for both READ and UPDATE queries. The plan cache is an LRU cache with a 5 minute TTL and a capacity of 1000.

" + }, + "explainMode":{ + "shape":"ExplainMode", + "documentation":"

The explain mode parameter returns a query explain instead of the actual query results. A query explain can be used to gather insights about the query execution such as planning decisions, time spent on each operator, solutions flowing etc.

", + "locationName":"explain" + }, + "queryTimeoutMilliseconds":{ + "shape":"Integer", + "documentation":"

Specifies the query timeout duration, in milliseconds. (optional)

" + } + } + }, + "ExecuteQueryOutput":{ + "type":"structure", + "required":["payload"], + "members":{ + "payload":{ + "shape":"QueryResponseBlob", + "documentation":"

The query results.

" + } + }, + "payload":"payload" + }, + "ExplainMode":{ + "type":"string", + "enum":[ + "STATIC", + "DETAILS" + ] + }, "Format":{ "type":"string", "enum":[ @@ -1198,6 +1454,42 @@ } } }, + "GetGraphSummaryInput":{ + "type":"structure", + "required":["graphIdentifier"], + "members":{ + "graphIdentifier":{ + "shape":"GraphIdentifier", + "documentation":"

The unique identifier of the Neptune Analytics graph.

", + "hostLabel":true, + "location":"header", + "locationName":"graphIdentifier" + }, + "mode":{ + "shape":"GraphSummaryMode", + "documentation":"

The summary mode can take one of two values: basic (the default), and detailed.

", + "location":"querystring", + "locationName":"mode" + } + } + }, + "GetGraphSummaryOutput":{ + "type":"structure", + "members":{ + "version":{ + "shape":"String", + "documentation":"

Display the version of this tool.

" + }, + "lastStatisticsComputationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The timestamp, in ISO 8601 format, of the time at which Neptune Analytics last computed statistics.

" + }, + "graphSummary":{ + "shape":"GraphDataSummary", + "documentation":"

The graph summary.

" + } + } + }, "GetImportTaskInput":{ "type":"structure", "required":["taskIdentifier"], @@ -1308,6 +1600,115 @@ } } }, + "GetQueryInput":{ + "type":"structure", + "required":[ + "graphIdentifier", + "queryId" + ], + "members":{ + "graphIdentifier":{ + "shape":"GraphIdentifier", + "documentation":"

The unique identifier of the Neptune Analytics graph.

", + "hostLabel":true, + "location":"header", + "locationName":"graphIdentifier" + }, + "queryId":{ + "shape":"String", + "documentation":"

The ID of the query in question.

", + "location":"uri", + "locationName":"queryId" + } + } + }, + "GetQueryOutput":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID of the query in question.

" + }, + "queryString":{ + "shape":"String", + "documentation":"

The query in question.

" + }, + "waited":{ + "shape":"Integer", + "documentation":"

Indicates how long the query waited, in milliseconds.

" + }, + "elapsed":{ + "shape":"Integer", + "documentation":"

The number of milliseconds the query has been running.

" + }, + "state":{ + "shape":"QueryState", + "documentation":"

State of the query.

" + } + } + }, + "GraphDataSummary":{ + "type":"structure", + "members":{ + "numNodes":{ + "shape":"Long", + "documentation":"

The number of nodes in the graph.

" + }, + "numEdges":{ + "shape":"Long", + "documentation":"

The number of edges in the graph.

" + }, + "numNodeLabels":{ + "shape":"Long", + "documentation":"

The number of distinct node labels in the graph.

" + }, + "numEdgeLabels":{ + "shape":"Long", + "documentation":"

The number of unique edge labels in the graph.

" + }, + "nodeLabels":{ + "shape":"NodeLabels", + "documentation":"

A list of distinct node labels in the graph.

" + }, + "edgeLabels":{ + "shape":"EdgeLabels", + "documentation":"

A list of the edge labels in the graph.

" + }, + "numNodeProperties":{ + "shape":"Long", + "documentation":"

The number of distinct node properties in the graph.

" + }, + "numEdgeProperties":{ + "shape":"Long", + "documentation":"

The number of edge properties in the graph.

" + }, + "nodeProperties":{ + "shape":"LongValuedMapList", + "documentation":"

A list of the distinct node properties in the graph, along with the count of nodes where each property is used.

" + }, + "edgeProperties":{ + "shape":"LongValuedMapList", + "documentation":"

A list of the distinct edge properties in the graph, along with the count of edges where each property is used.

" + }, + "totalNodePropertyValues":{ + "shape":"Long", + "documentation":"

The total number of usages of all node properties.

" + }, + "totalEdgePropertyValues":{ + "shape":"Long", + "documentation":"

The total number of usages of all edge properties.

" + }, + "nodeStructures":{ + "shape":"NodeStructures", + "documentation":"

This field is only present when the requested mode is DETAILED. It contains a list of node structures.

" + }, + "edgeStructures":{ + "shape":"EdgeStructures", + "documentation":"

This field is only present when the requested mode is DETAILED. It contains a list of edge structures.

" + } + }, + "documentation":"

Summary information about the graph.

" + }, "GraphId":{ "type":"string", "pattern":"g-[a-z0-9]{10}" @@ -1432,6 +1833,13 @@ "type":"list", "member":{"shape":"GraphSummary"} }, + "GraphSummaryMode":{ + "type":"string", + "enum":[ + "BASIC", + "DETAILED" + ] + }, "ImportOptions":{ "type":"structure", "members":{ @@ -1707,6 +2115,44 @@ } } }, + "ListQueriesInput":{ + "type":"structure", + "required":[ + "graphIdentifier", + "maxResults" + ], + "members":{ + "graphIdentifier":{ + "shape":"GraphIdentifier", + "documentation":"

The unique identifier of the Neptune Analytics graph.

", + "hostLabel":true, + "location":"header", + "locationName":"graphIdentifier" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of results to be fetched by the API.

", + "location":"querystring", + "locationName":"maxResults" + }, + "state":{ + "shape":"QueryStateInput", + "documentation":"

Filtered list of queries based on state.

", + "location":"querystring", + "locationName":"state" + } + } + }, + "ListQueriesOutput":{ + "type":"structure", + "required":["queries"], + "members":{ + "queries":{ + "shape":"QuerySummaryList", + "documentation":"

A list of current openCypher queries.

" + } + } + }, "ListTagsForResourceInput":{ "type":"structure", "required":["resourceArn"], @@ -1732,6 +2178,15 @@ "type":"long", "box":true }, + "LongValuedMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"Long"} + }, + "LongValuedMapList":{ + "type":"list", + "member":{"shape":"LongValuedMap"} + }, "MaxResults":{ "type":"integer", "box":true, @@ -1759,7 +2214,7 @@ }, "preserveEdgeIds":{ "shape":"Boolean", - "documentation":"

Neptune Analytics currently does not support user defined edge ids. The edge ids are not imported by default. They are imported if preserveEdgeIds is set to true, and ids are stored as properties on the relationships with the property name neptuneEdgeId.

" + "documentation":"

Neptune Analytics currently does not support user defined edge ids. The edge ids are not imported by default. They are imported if preserveEdgeIds is set to true, and ids are stored as properties on the relationships with the property name neptuneEdgeId.

" } }, "documentation":"

Options for how to import Neptune data.

" @@ -1774,11 +2229,53 @@ "max":1024, "min":1 }, + "NodeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, + "NodeProperties":{ + "type":"list", + "member":{"shape":"String"} + }, + "NodeStructure":{ + "type":"structure", + "members":{ + "count":{ + "shape":"Long", + "documentation":"

The number of instances of this node.

" + }, + "nodeProperties":{ + "shape":"NodeProperties", + "documentation":"

Properties associated with this node.

" + }, + "distinctOutgoingEdgeLabels":{ + "shape":"OutgoingEdgeLabels", + "documentation":"

The outgoing edge labels associated with this node.

" + } + }, + "documentation":"

Information about a node.

" + }, + "NodeStructures":{ + "type":"list", + "member":{"shape":"NodeStructure"} + }, + "OutgoingEdgeLabels":{ + "type":"list", + "member":{"shape":"String"} + }, "PaginationToken":{ "type":"string", "max":8192, "min":1 }, + "PlanCacheType":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED", + "AUTO" + ] + }, "PrivateGraphEndpointStatus":{ "type":"string", "enum":[ @@ -1825,6 +2322,61 @@ "max":24576, "min":128 }, + "QueryLanguage":{ + "type":"string", + "enum":["OPEN_CYPHER"] + }, + "QueryResponseBlob":{ + "type":"blob", + "streaming":true + }, + "QueryState":{ + "type":"string", + "enum":[ + "RUNNING", + "WAITING", + "CANCELLING" + ] + }, + "QueryStateInput":{ + "type":"string", + "enum":[ + "ALL", + "RUNNING", + "WAITING", + "CANCELLING" + ] + }, + "QuerySummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

A string representation of the id of the query.

" + }, + "queryString":{ + "shape":"String", + "documentation":"

The actual query text. The queryString may be truncated if the actual query string is too long.

" + }, + "waited":{ + "shape":"Integer", + "documentation":"

The amount of time, in milliseconds, the query has waited in the queue before being picked up by a worker thread.

" + }, + "elapsed":{ + "shape":"Integer", + "documentation":"

The running time of the query, in milliseconds.

" + }, + "state":{ + "shape":"QueryState", + "documentation":"

State of the query.

" + } + }, + "documentation":"

Details of the query listed.

" + }, + "QuerySummaryList":{ + "type":"list", + "member":{"shape":"QuerySummary"} + }, "ReplicaCount":{ "type":"integer", "box":true, @@ -1968,7 +2520,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable).

" + "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable).

" } } }, @@ -2116,6 +2668,10 @@ "max":6, "min":1 }, + "SyntheticTimestamp_date_time":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, "TagKey":{ "type":"string", "max":128, @@ -2186,6 +2742,36 @@ "retryable":{"throttling":true} }, "Timestamp":{"type":"timestamp"}, + "UnprocessableException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{"shape":"String"}, + "reason":{ + "shape":"UnprocessableExceptionReason", + "documentation":"

The reason for the unprocessable exception.

" + } + }, + "documentation":"

Request cannot be processed due to known reasons. Eg. partition full.

", + "error":{ + "httpStatusCode":422, + "senderFault":true + }, + "exception":true + }, + "UnprocessableExceptionReason":{ + "type":"string", + "enum":[ + "QUERY_TIMEOUT", + "INTERNAL_LIMIT_EXCEEDED", + "MEMORY_LIMIT_EXCEEDED", + "STORAGE_LIMIT_EXCEEDED", + "PARTITION_FULL" + ] + }, "UntagResourceInput":{ "type":"structure", "required":[ @@ -2224,7 +2810,7 @@ }, "publicConnectivity":{ "shape":"Boolean", - "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs IAM authenticated. (true to enable, or false to disable.

" + "documentation":"

Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated. (true to enable, or false to disable.

" }, "provisionedMemory":{ "shape":"ProvisionedMemory", @@ -2316,7 +2902,7 @@ "documentation":"

The reason that the resource could not be validated.

" } }, - "documentation":"

A resource could not be validated

", + "documentation":"

A resource could not be validated.

", "error":{ "httpStatusCode":400, "senderFault":true @@ -2361,5 +2947,5 @@ "pattern":"vpc-[a-z0-9]+" } }, - "documentation":"

Neptune Analytics is a serverless in-memory graph database service for analytics that delivers high-performance analytics and real-time queries for any graph type. It complements the Amazon Neptune Database, an industry-leading managed graph database.

" + "documentation":"

Neptune Analytics is a new analytics database engine for Amazon Neptune that helps customers get to insights faster by quickly processing large amounts of graph data, invoking popular graph analytic algorithms in low-latency queries, and getting analytics results in seconds.

" } diff -Nru awscli-2.15.9/awscli/botocore/data/opensearch/2021-01-01/service-2.json awscli-2.15.22/awscli/botocore/data/opensearch/2021-01-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/opensearch/2021-01-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/opensearch/2021-01-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -95,6 +95,23 @@ ], "documentation":"

Provides access to an Amazon OpenSearch Service domain through the use of an interface VPC endpoint.

" }, + "CancelDomainConfigChange":{ + "name":"CancelDomainConfigChange", + "http":{ + "method":"POST", + "requestUri":"/2021-01-01/opensearch/domain/{DomainName}/config/cancel" + }, + "input":{"shape":"CancelDomainConfigChangeRequest"}, + "output":{"shape":"CancelDomainConfigChangeResponse"}, + "errors":[ + {"shape":"BaseException"}, + {"shape":"InternalException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"DisabledOperationException"} + ], + "documentation":"

Cancels a pending configuration change on an Amazon OpenSearch Service domain.

" + }, "CancelServiceSoftwareUpdate":{ "name":"CancelServiceSoftwareUpdate", "http":{ @@ -1648,6 +1665,38 @@ "exception":true }, "Boolean":{"type":"boolean"}, + "CancelDomainConfigChangeRequest":{ + "type":"structure", + "required":["DomainName"], + "members":{ + "DomainName":{ + "shape":"DomainName", + "location":"uri", + "locationName":"DomainName" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

When set to True, returns the list of change IDs and properties that will be cancelled without actually cancelling the change.

" + } + } + }, + "CancelDomainConfigChangeResponse":{ + "type":"structure", + "members":{ + "CancelledChangeIds":{ + "shape":"GUIDList", + "documentation":"

The unique identifiers of the changes that were cancelled.

" + }, + "CancelledChangeProperties":{ + "shape":"CancelledChangePropertyList", + "documentation":"

The domain change properties that were cancelled.

" + }, + "DryRun":{ + "shape":"DryRun", + "documentation":"

Whether or not the request was a dry run. If True, the changes were not actually cancelled.

" + } + } + }, "CancelServiceSoftwareUpdateRequest":{ "type":"structure", "required":["DomainName"], @@ -1669,6 +1718,28 @@ }, "documentation":"

Container for the response to a CancelServiceSoftwareUpdate operation. Contains the status of the update.

" }, + "CancelledChangeProperty":{ + "type":"structure", + "members":{ + "PropertyName":{ + "shape":"String", + "documentation":"

The name of the property whose change was cancelled.

" + }, + "CancelledValue":{ + "shape":"String", + "documentation":"

The pending value of the property that was cancelled. This would have been the eventual value of the property if the chance had not been cancelled.

" + }, + "ActiveValue":{ + "shape":"String", + "documentation":"

The current value of the property, after the change was cancelled.

" + } + }, + "documentation":"

A property change that was cancelled for an Amazon OpenSearch Service domain.

" + }, + "CancelledChangePropertyList":{ + "type":"list", + "member":{"shape":"CancelledChangeProperty"} + }, "ChangeProgressDetails":{ "type":"structure", "members":{ @@ -1679,6 +1750,22 @@ "Message":{ "shape":"Message", "documentation":"

A message corresponding to the status of the configuration change.

" + }, + "ConfigChangeStatus":{ + "shape":"ConfigChangeStatus", + "documentation":"

The current status of the configuration change.

" + }, + "InitiatedBy":{ + "shape":"InitiatedBy", + "documentation":"

The IAM principal who initiated the configuration change.

" + }, + "StartTime":{ + "shape":"UpdateTimestamp", + "documentation":"

The time that the configuration change was initiated, in Universal Coordinated Time (UTC).

" + }, + "LastUpdatedTime":{ + "shape":"UpdateTimestamp", + "documentation":"

The last time that the configuration change was updated.

" } }, "documentation":"

Container for information about a configuration change happening on a domain.

" @@ -1750,6 +1837,18 @@ "ChangeProgressStages":{ "shape":"ChangeProgressStageList", "documentation":"

The specific stages that the domain is going through to perform the configuration change.

" + }, + "LastUpdatedTime":{ + "shape":"UpdateTimestamp", + "documentation":"

The last time that the status of the configuration change was updated.

" + }, + "ConfigChangeStatus":{ + "shape":"ConfigChangeStatus", + "documentation":"

The current status of the configuration change.

" + }, + "InitiatedBy":{ + "shape":"InitiatedBy", + "documentation":"

The IAM principal who initiated the configuration change.

" } }, "documentation":"

The progress details of a specific domain configuration change.

" @@ -1884,7 +1983,7 @@ "members":{ "Enabled":{ "shape":"Boolean", - "documentation":"

Whether to enable or disable cold storage on the domain.

" + "documentation":"

Whether to enable or disable cold storage on the domain. You must enable UltraWarm storage to enable cold storage.

" } }, "documentation":"

Container for the parameters required to enable cold storage for an OpenSearch Service domain. For more information, see Cold storage for Amazon OpenSearch Service.

" @@ -1911,6 +2010,19 @@ }, "documentation":"

A map of OpenSearch or Elasticsearch versions and the versions you can upgrade them to.

" }, + "ConfigChangeStatus":{ + "type":"string", + "enum":[ + "Pending", + "Initializing", + "Validating", + "ValidationFailed", + "ApplyingChanges", + "Completed", + "PendingUserInput", + "Cancelled" + ] + }, "ConflictException":{ "type":"structure", "members":{ @@ -2434,7 +2546,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

The result of a DescribeDomainAutoTunes request.

" @@ -2700,7 +2812,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Contains a list of connections matching the filter criteria.

" @@ -2770,7 +2882,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Contains a list of connections matching the filter criteria.

" @@ -2840,7 +2952,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Container for the response returned by the DescribePackages operation.

" @@ -2874,7 +2986,7 @@ "members":{ "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" }, "ReservedInstanceOfferings":{ "shape":"ReservedInstanceOfferingList", @@ -2912,7 +3024,7 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" }, "ReservedInstances":{ "shape":"ReservedInstanceList", @@ -3070,6 +3182,10 @@ "SoftwareUpdateOptions":{ "shape":"SoftwareUpdateOptionsStatus", "documentation":"

Software update options for the domain.

" + }, + "ModifyingProperties":{ + "shape":"ModifyingPropertiesList", + "documentation":"

Information about the domain properties that are currently being modified.

" } }, "documentation":"

Container for the configuration of an OpenSearch Service domain.

" @@ -3321,6 +3437,18 @@ "DISSOCIATION_FAILED" ] }, + "DomainProcessingStatusType":{ + "type":"string", + "enum":[ + "Creating", + "Active", + "Modifying", + "UpgradingEngineVersion", + "UpdatingServiceSoftware", + "Isolated", + "Deleting" + ] + }, "DomainState":{ "type":"string", "enum":[ @@ -3453,6 +3581,14 @@ "SoftwareUpdateOptions":{ "shape":"SoftwareUpdateOptions", "documentation":"

Service software update options for the domain.

" + }, + "DomainProcessingStatus":{ + "shape":"DomainProcessingStatusType", + "documentation":"

The status of any changes that are currently in progress for the domain.

" + }, + "ModifyingProperties":{ + "shape":"ModifyingPropertiesList", + "documentation":"

Information about the domain properties that are currently being modified.

" } }, "documentation":"

The current status of an OpenSearch Service domain.

" @@ -3593,7 +3729,7 @@ "documentation":"

The KMS key ID. Takes the form 1a2a3a4-1a2a-3a4a-5a6a-1a2a3a4a5a6a.

" } }, - "documentation":"

Specifies whether the domain should encrypt data at rest, and if so, the Key Management Service (KMS) key to use. Can be used only to create a new domain, not update an existing one.

" + "documentation":"

Specifies whether the domain should encrypt data at rest, and if so, the Key Management Service (KMS) key to use. Can only be used when creating a new domain or enabling encryption at rest for the first time on an existing domain. You can't modify this parameter after it's already been specified.

" }, "EncryptionAtRestOptionsStatus":{ "type":"structure", @@ -3687,6 +3823,10 @@ "min":36, "pattern":"\\p{XDigit}{8}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{4}-\\p{XDigit}{12}" }, + "GUIDList":{ + "type":"list", + "member":{"shape":"GUID"} + }, "GetCompatibleVersionsRequest":{ "type":"structure", "members":{ @@ -3839,7 +3979,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Container for response returned by GetPackageVersionHistory operation.

" @@ -3878,7 +4018,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Container for the response returned by the GetUpgradeHistory operation.

" @@ -3999,6 +4139,13 @@ "type":"list", "member":{"shape":"InboundConnection"} }, + "InitiatedBy":{ + "type":"string", + "enum":[ + "CUSTOMER", + "SERVICE" + ] + }, "InstanceCount":{ "type":"integer", "documentation":"

Number of instances in an OpenSearch Service cluster.

", @@ -4226,7 +4373,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

The result of a ListDomainMaintenances request that contains information about the requested actions.

" @@ -4287,7 +4434,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Container for the response parameters to the ListDomainsForPackage operation.

" @@ -4343,7 +4490,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } } }, @@ -4381,7 +4528,7 @@ }, "NextToken":{ "shape":"String", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Container for the response parameters to the ListPackagesForDomain operation.

" @@ -4419,7 +4566,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } } }, @@ -4473,7 +4620,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } }, "documentation":"

Container for the parameters for response received from the ListVersions operation.

" @@ -4509,7 +4656,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } } }, @@ -4544,7 +4691,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } } }, @@ -4572,7 +4719,7 @@ }, "NextToken":{ "shape":"NextToken", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" } } }, @@ -4687,9 +4834,35 @@ "type":"integer", "documentation":"

Minimum number of instances that can be instantiated for a given instance type.

" }, + "ModifyingProperties":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the property that is currently being modified.

" + }, + "ActiveValue":{ + "shape":"String", + "documentation":"

The current value of the domain property that is being modified.

" + }, + "PendingValue":{ + "shape":"String", + "documentation":"

The value that the property that is currently being modified will eventually have.

" + }, + "ValueType":{ + "shape":"PropertyValueType", + "documentation":"

The type of value that is currently being modified. Properties can have two types:

" + } + }, + "documentation":"

Information about the domain properties that are currently being modified.

" + }, + "ModifyingPropertiesList":{ + "type":"list", + "member":{"shape":"ModifyingProperties"} + }, "NextToken":{ "type":"string", - "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page.

" + "documentation":"

When nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Send the request again using the returned token to retrieve the next page.

" }, "NodeId":{ "type":"string", @@ -4833,6 +5006,14 @@ "t3.large.search", "t3.xlarge.search", "t3.2xlarge.search", + "or1.medium.search", + "or1.large.search", + "or1.xlarge.search", + "or1.2xlarge.search", + "or1.4xlarge.search", + "or1.8xlarge.search", + "or1.12xlarge.search", + "or1.16xlarge.search", "ultrawarm1.medium.search", "ultrawarm1.large.search", "ultrawarm1.xlarge.search", @@ -5219,6 +5400,13 @@ "AWS_SERVICE" ] }, + "PropertyValueType":{ + "type":"string", + "enum":[ + "PLAIN_TEXT", + "STRINGIFIED_JSON" + ] + }, "PurchaseReservedInstanceOfferingRequest":{ "type":"structure", "required":[ @@ -6686,5 +6874,5 @@ ] } }, - "documentation":"

Use the Amazon OpenSearch Service configuration API to create, configure, and manage OpenSearch Service domains.

For sample code that uses the configuration API, see the Amazon OpenSearch Service Developer Guide . The guide also contains sample code for sending signed HTTP requests to the OpenSearch APIs. The endpoint for configuration service requests is Region specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported Regions and endpoints, see Amazon Web Services service endpoints.

" + "documentation":"

Use the Amazon OpenSearch Service configuration API to create, configure, and manage OpenSearch Service domains. The endpoint for configuration service requests is Region specific: es.region.amazonaws.com. For example, es.us-east-1.amazonaws.com. For a current list of supported Regions and endpoints, see Amazon Web Services service endpoints.

" } diff -Nru awscli-2.15.9/awscli/botocore/data/organizations/2016-11-28/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/organizations/2016-11-28/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/organizations/2016-11-28/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/organizations/2016-11-28/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -388,7 +386,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -423,7 +420,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -434,14 +430,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -455,14 +453,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -471,11 +467,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -486,14 +482,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -507,7 +505,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -527,7 +524,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -538,14 +534,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -556,9 +554,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/organizations/2016-11-28/service-2.json awscli-2.15.22/awscli/botocore/data/organizations/2016-11-28/service-2.json --- awscli-2.15.9/awscli/botocore/data/organizations/2016-11-28/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/organizations/2016-11-28/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -100,7 +100,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"UnsupportedAPIEndpointException"} ], - "documentation":"

Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following:

  • You can close only 10% of member accounts, between 10 and 200, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can close additional accounts. For more information, see Closing a member account in your organization in the Organizations User Guide.

  • To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status.

  • If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide.

" + "documentation":"

Closes an Amazon Web Services member account within an organization. You can close an account when all features are enabled . You can't close the management account with this API. This is an asynchronous request that Amazon Web Services performs in the background. Because CloseAccount operates asynchronously, it can return a successful completion message even though account closure might still be in progress. You need to wait a few minutes before the account is fully closed. To check the status of the request, do one of the following:

  • You can close only 10% of member accounts, between 10 and 1000, within a rolling 30 day period. This quota is not bound by a calendar month, but starts when you close an account. After you reach this limit, you can close additional accounts. For more information, see Closing a member account in your organization and Quotas for Organizationsin the Organizations User Guide.

  • To reinstate a closed account, contact Amazon Web Services Support within the 90-day grace period while the account is in SUSPENDED status.

  • If the Amazon Web Services account you attempt to close is linked to an Amazon Web Services GovCloud (US) account, the CloseAccount request will close both accounts. To learn important pre-closure details, see Closing an Amazon Web Services GovCloud (US) account in the Amazon Web Services GovCloud User Guide.

" }, "CreateAccount":{ "name":"CreateAccount", @@ -1378,7 +1378,7 @@ "Message":{"shape":"ExceptionMessage"}, "Reason":{"shape":"ConstraintViolationExceptionReason"} }, - "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

", + "documentation":"

Performing this operation violates a minimum or maximum value limit. For example, attempting to remove the last service control policy (SCP) from an OU or root, inviting or creating too many accounts to the organization, or attaching too many policies to an account, OU, or root. This exception includes a reason that contains additional information about the violated limit:

Some of the reasons in the following list might not be applicable to this specific API or operation.

", "exception":true }, "ConstraintViolationExceptionReason":{ @@ -1646,7 +1646,7 @@ "members":{ "Content":{ "shape":"PolicyContent", - "documentation":"

The policy text content to add to the new policy. The text that you supply must adhere to the rules of the policy type you specify in the Type parameter.

" + "documentation":"

The policy text content to add to the new policy. The text that you supply must adhere to the rules of the policy type you specify in the Type parameter.

The maximum size of a policy document depends on the policy's type. For more information, see Maximum and minimum values in the Organizations User Guide.

" }, "Description":{ "shape":"PolicyDescription", @@ -3650,7 +3650,7 @@ }, "Content":{ "shape":"PolicyContent", - "documentation":"

If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see SCP syntax in the Organizations User Guide.

" + "documentation":"

If provided, the new content for the policy. The text must be correctly formatted JSON that complies with the syntax for the policy's type. For more information, see SCP syntax in the Organizations User Guide.

The maximum size of a policy document depends on the policy's type. For more information, see Maximum and minimum values in the Organizations User Guide.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/outposts/2019-12-03/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/outposts/2019-12-03/service-2.json awscli-2.15.22/awscli/botocore/data/outposts/2019-12-03/service-2.json --- awscli-2.15.9/awscli/botocore/data/outposts/2019-12-03/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/outposts/2019-12-03/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1309,7 +1309,6 @@ }, "LineItemQuantity":{ "type":"integer", - "max":20, "min":1 }, "LineItemRequest":{ @@ -2048,7 +2047,6 @@ "StartConnectionRequest":{ "type":"structure", "required":[ - "DeviceSerialNumber", "AssetId", "ClientPublicKey", "NetworkInterfaceDeviceIndex" diff -Nru awscli-2.15.9/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json awscli-2.15.22/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json --- awscli-2.15.9/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -111,7 +111,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Exports a key from Amazon Web Services Payment Cryptography.

Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ExportKey you can export symmetric keys using either symmetric and asymmetric key exchange mechanisms. Using this operation, you can share your Amazon Web Services Payment Cryptography generated keys with other service partners to perform cryptographic operations outside of Amazon Web Services Payment Cryptography

For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm . Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK). After which you can export working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

The TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block.

You can also use ExportKey functionality to generate and export an IPEK (Initial Pin Encryption Key) from Amazon Web Services Payment Cryptography using either TR-31 or TR-34 export key exchange. IPEK is generated from BDK (Base Derivation Key) and ExportDukptInitialKey attribute KSN (KeySerialNumber). The generated IPEK does not persist within Amazon Web Services Payment Cryptography and has to be re-generated each time during export.

To export KEK or IPEK using TR-34

Using this operation, you can export initial key using TR-34 asymmetric key exchange. You can only export KEK generated within Amazon Web Services Payment Cryptography. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During key export process, KDH is Amazon Web Services Payment Cryptography which initiates key export and KRD is the user receiving the key.

To initiate TR-34 key export, the KRD must obtain an export token by calling GetParametersForExport. This operation also generates a key pair for the purpose of key export, signs the key and returns back the signing public key certificate (also known as KDH signing certificate) and root certificate chain. The KDH uses the private key to sign the the export payload and the signing public key certificate is provided to KRD to verify the signature. The KRD can import the root certificate into its Hardware Security Module (HSM), as required. The export token and the associated KDH signing certificate expires after 7 days.

Next the KRD generates a key pair for the the purpose of encrypting the KDH key and provides the public key cerificate (also known as KRD wrapping certificate) back to KDH. The KRD will also import the root cerificate chain into Amazon Web Services Payment Cryptography by calling ImportKey for RootCertificatePublicKey. The KDH, Amazon Web Services Payment Cryptography, will use the KRD wrapping cerificate to encrypt (wrap) the key under export and signs it with signing private key to generate a TR-34 WrappedKeyBlock. For more information on TR-34 key export, see section Exporting symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

Set the following parameters:

When this operation is successful, Amazon Web Services Payment Cryptography returns the KEK or IPEK as a TR-34 WrappedKeyBlock.

To export WK (Working Key) or IPEK using TR-31

Using this operation, you can export working keys or IPEK using TR-31 symmetric key exchange. In TR-31, you must use an initial key such as KEK to encrypt or wrap the key under export. To establish a KEK, you can use CreateKey or ImportKey.

Set the following parameters:

When this operation is successful, Amazon Web Services Payment Cryptography returns the WK or IPEK as a TR-31 WrappedKeyBlock.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" + "documentation":"

Exports a key from Amazon Web Services Payment Cryptography.

Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ExportKey you can export symmetric keys using either symmetric and asymmetric key exchange mechanisms. Using this operation, you can share your Amazon Web Services Payment Cryptography generated keys with other service partners to perform cryptographic operations outside of Amazon Web Services Payment Cryptography

For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm and RSA wrap and unwrap key exchange mechanism. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK). After which you can export working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

The TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block. With RSA wrap and unwrap, you can exchange both 3DES and AES-128 keys. The keys are imported in a WrappedKeyCryptogram format and you will need to specify the key attributes during import.

You can also use ExportKey functionality to generate and export an IPEK (Initial Pin Encryption Key) from Amazon Web Services Payment Cryptography using either TR-31 or TR-34 export key exchange. IPEK is generated from BDK (Base Derivation Key) and ExportDukptInitialKey attribute KSN (KeySerialNumber). The generated IPEK does not persist within Amazon Web Services Payment Cryptography and has to be re-generated each time during export.

To export initial keys (KEK) or IPEK using TR-34

Using this operation, you can export initial key using TR-34 asymmetric key exchange. You can only export KEK generated within Amazon Web Services Payment Cryptography. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During key export process, KDH is Amazon Web Services Payment Cryptography which initiates key export and KRD is the user receiving the key.

To initiate TR-34 key export, the KRD must obtain an export token by calling GetParametersForExport. This operation also generates a key pair for the purpose of key export, signs the key and returns back the signing public key certificate (also known as KDH signing certificate) and root certificate chain. The KDH uses the private key to sign the the export payload and the signing public key certificate is provided to KRD to verify the signature. The KRD can import the root certificate into its Hardware Security Module (HSM), as required. The export token and the associated KDH signing certificate expires after 7 days.

Next the KRD generates a key pair for the the purpose of encrypting the KDH key and provides the public key cerificate (also known as KRD wrapping certificate) back to KDH. The KRD will also import the root cerificate chain into Amazon Web Services Payment Cryptography by calling ImportKey for RootCertificatePublicKey. The KDH, Amazon Web Services Payment Cryptography, will use the KRD wrapping cerificate to encrypt (wrap) the key under export and signs it with signing private key to generate a TR-34 WrappedKeyBlock. For more information on TR-34 key export, see section Exporting symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

Set the following parameters:

When this operation is successful, Amazon Web Services Payment Cryptography returns the KEK or IPEK as a TR-34 WrappedKeyBlock.

To export initial keys (KEK) or IPEK using RSA Wrap and Unwrap

Using this operation, you can export initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate export, generate an asymmetric key pair on the receiving HSM and obtain the public key certificate in PEM format (base64 encoded) for the purpose of wrapping and the root certifiate chain. Import the root certificate into Amazon Web Services Payment Cryptography by calling ImportKey for RootCertificatePublicKey.

Next call ExportKey and set the following parameters:

When this operation is successful, Amazon Web Services Payment Cryptography returns the WrappedKeyCryptogram.

To export working keys or IPEK using TR-31

Using this operation, you can export working keys or IPEK using TR-31 symmetric key exchange. In TR-31, you must use an initial key such as KEK to encrypt or wrap the key under export. To establish a KEK, you can use CreateKey or ImportKey.

Set the following parameters:

When this operation is successful, Amazon Web Services Payment Cryptography returns the working key or IPEK as a TR-31 WrappedKeyBlock.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" }, "GetAlias":{ "name":"GetAlias", @@ -187,7 +187,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets the import token and the wrapping key certificate in PEM format (base64 encoded) to initiate a TR-34 WrappedKeyBlock.

The wrapping key certificate wraps the key under import. The import token and wrapping key certificate must be in place and operational before calling ImportKey. The import token expires in 7 days. You can use the same import token to import multiple keys into your service account.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" + "documentation":"

Gets the import token and the wrapping key certificate in PEM format (base64 encoded) to initiate a TR-34 WrappedKeyBlock or a RSA WrappedKeyCryptogram import into Amazon Web Services Payment Cryptography.

The wrapping key certificate wraps the key under import. The import token and wrapping key certificate must be in place and operational before calling ImportKey. The import token expires in 7 days. You can use the same import token to import multiple keys into your service account.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" }, "GetPublicKeyCertificate":{ "name":"GetPublicKeyCertificate", @@ -225,7 +225,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Imports symmetric keys and public key certificates in PEM format (base64 encoded) into Amazon Web Services Payment Cryptography.

Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ImportKey you can import symmetric keys using either symmetric and asymmetric key exchange mechanisms.

For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm . Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK) or Zone Master Key (ZMK). After which you can import working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

The TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block.

You can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.

To import a public root key certificate

You can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.

To import a public root key certificate

Using this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or signing key in TR-34, within your Amazon Web Services Payment Cryptography account.

Set the following parameters:

To import a trusted public key certificate

The root public key certificate must be in place and operational before you import a trusted public key certificate. Set the following parameters:

To import KEK or ZMK using TR-34

Using this operation, you can import initial key using TR-34 asymmetric key exchange. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During the key import process, KDH is the user who initiates the key import and KRD is Amazon Web Services Payment Cryptography who receives the key.

To initiate TR-34 key import, the KDH must obtain an import token by calling GetParametersForImport. This operation generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate (also known as KRD wrapping certificate) and the root certificate chain. The KDH must trust and install the KRD wrapping certificate on its HSM and use it to encrypt (wrap) the KDH key during TR-34 WrappedKeyBlock generation. The import token and associated KRD wrapping certificate expires after 7 days.

Next the KDH generates a key pair for the purpose of signing the encrypted KDH key and provides the public certificate of the signing key to Amazon Web Services Payment Cryptography. The KDH will also need to import the root certificate chain of the KDH signing certificate by calling ImportKey for RootCertificatePublicKey. For more information on TR-34 key import, see section Importing symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

Set the following parameters:

To import WK (Working Key) using TR-31

Amazon Web Services Payment Cryptography uses TR-31 symmetric key exchange norm to import working keys. A KEK must be established within Amazon Web Services Payment Cryptography by using TR-34 key import or by using CreateKey. To initiate a TR-31 key import, set the following parameters:

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" + "documentation":"

Imports symmetric keys and public key certificates in PEM format (base64 encoded) into Amazon Web Services Payment Cryptography.

Amazon Web Services Payment Cryptography simplifies key exchange by replacing the existing paper-based approach with a modern electronic approach. With ImportKey you can import symmetric keys using either symmetric and asymmetric key exchange mechanisms.

For symmetric key exchange, Amazon Web Services Payment Cryptography uses the ANSI X9 TR-31 norm in accordance with PCI PIN guidelines. And for asymmetric key exchange, Amazon Web Services Payment Cryptography supports ANSI X9 TR-34 norm and RSA wrap and unwrap key exchange mechanisms. Asymmetric key exchange methods are typically used to establish bi-directional trust between the two parties exhanging keys and are used for initial key exchange such as Key Encryption Key (KEK) or Zone Master Key (ZMK). After which you can import working keys using symmetric method to perform various cryptographic operations within Amazon Web Services Payment Cryptography.

The TR-34 norm is intended for exchanging 3DES keys only and keys are imported in a WrappedKeyBlock format. Key attributes (such as KeyUsage, KeyAlgorithm, KeyModesOfUse, Exportability) are contained within the key block. With RSA wrap and unwrap, you can exchange both 3DES and AES-128 keys. The keys are imported in a WrappedKeyCryptogram format and you will need to specify the key attributes during import.

You can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.

To import a public root key certificate

You can also import a root public key certificate, used to sign other public key certificates, or a trusted public key certificate under an already established root public key certificate.

To import a public root key certificate

Using this operation, you can import the public component (in PEM cerificate format) of your private root key. You can use the imported public root key certificate for digital signatures, for example signing wrapping key or signing key in TR-34, within your Amazon Web Services Payment Cryptography account.

Set the following parameters:

To import a trusted public key certificate

The root public key certificate must be in place and operational before you import a trusted public key certificate. Set the following parameters:

To import initial keys (KEK or ZMK or similar) using TR-34

Using this operation, you can import initial key using TR-34 asymmetric key exchange. In TR-34 terminology, the sending party of the key is called Key Distribution Host (KDH) and the receiving party of the key is called Key Receiving Device (KRD). During the key import process, KDH is the user who initiates the key import and KRD is Amazon Web Services Payment Cryptography who receives the key.

To initiate TR-34 key import, the KDH must obtain an import token by calling GetParametersForImport. This operation generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate (also known as KRD wrapping certificate) and the root certificate chain. The KDH must trust and install the KRD wrapping certificate on its HSM and use it to encrypt (wrap) the KDH key during TR-34 WrappedKeyBlock generation. The import token and associated KRD wrapping certificate expires after 7 days.

Next the KDH generates a key pair for the purpose of signing the encrypted KDH key and provides the public certificate of the signing key to Amazon Web Services Payment Cryptography. The KDH will also need to import the root certificate chain of the KDH signing certificate by calling ImportKey for RootCertificatePublicKey. For more information on TR-34 key import, see section Importing symmetric keys in the Amazon Web Services Payment Cryptography User Guide.

Set the following parameters:

To import initial keys (KEK or ZMK or similar) using RSA Wrap and Unwrap

Using this operation, you can import initial key using asymmetric RSA wrap and unwrap key exchange method. To initiate import, call GetParametersForImport with KeyMaterial set to KEY_CRYPTOGRAM to generate an import token. This operation also generates an encryption keypair for the purpose of key import, signs the key and returns back the wrapping key certificate in PEM format (base64 encoded) and its root certificate chain. The import token and associated KRD wrapping certificate expires after 7 days.

You must trust and install the wrapping certificate and its certificate chain on the sending HSM and use it to wrap the key under export for WrappedKeyCryptogram generation. Next call ImportKey with KeyMaterial set to KEY_CRYPTOGRAM and provide the ImportToken and KeyAttributes for the key under import.

To import working keys using TR-31

Amazon Web Services Payment Cryptography uses TR-31 symmetric key exchange norm to import working keys. A KEK must be established within Amazon Web Services Payment Cryptography by using TR-34 key import or by using CreateKey. To initiate a TR-31 key import, set the following parameters:

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" }, "ListAliases":{ "name":"ListAliases", @@ -586,6 +586,28 @@ }, "documentation":"

Parameter information for IPEK generation during export.

" }, + "ExportKeyCryptogram":{ + "type":"structure", + "required":[ + "CertificateAuthorityPublicKeyIdentifier", + "WrappingKeyCertificate" + ], + "members":{ + "CertificateAuthorityPublicKeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The KeyARN of the certificate chain that signs the wrapping key certificate during RSA wrap and unwrap key export.

" + }, + "WrappingKeyCertificate":{ + "shape":"CertificateType", + "documentation":"

The wrapping key certificate in PEM format (base64 encoded). Amazon Web Services Payment Cryptography uses this certificate to wrap the key under export.

" + }, + "WrappingSpec":{ + "shape":"WrappingKeySpec", + "documentation":"

The wrapping spec for the key under export.

" + } + }, + "documentation":"

Parameter information for key material export using asymmetric RSA wrap and unwrap key exchange method.

" + }, "ExportKeyInput":{ "type":"structure", "required":[ @@ -610,6 +632,10 @@ "ExportKeyMaterial":{ "type":"structure", "members":{ + "KeyCryptogram":{ + "shape":"ExportKeyCryptogram", + "documentation":"

Parameter information for key material export using asymmetric RSA wrap and unwrap key exchange method

" + }, "Tr31KeyBlock":{ "shape":"ExportTr31KeyBlock", "documentation":"

Parameter information for key material export using symmetric TR-31 key exchange method.

" @@ -619,7 +645,7 @@ "documentation":"

Parameter information for key material export using the asymmetric TR-34 key exchange method.

" } }, - "documentation":"

Parameter information for key material export from Amazon Web Services Payment Cryptography using TR-31 or TR-34 key exchange method.

", + "documentation":"

Parameter information for key material export from Amazon Web Services Payment Cryptography using TR-31 or TR-34 or RSA wrap and unwrap key exchange method.

", "union":true }, "ExportKeyOutput":{ @@ -627,7 +653,7 @@ "members":{ "WrappedKey":{ "shape":"WrappedKey", - "documentation":"

The key material under export as a TR-34 WrappedKeyBlock or a TR-31 WrappedKeyBlock.

" + "documentation":"

The key material under export as a TR-34 WrappedKeyBlock or a TR-31 WrappedKeyBlock. or a RSA WrappedKeyCryptogram.

" } } }, @@ -776,11 +802,11 @@ "members":{ "KeyMaterialType":{ "shape":"KeyMaterialType", - "documentation":"

The method to use for key material import. Import token is only required for TR-34 WrappedKeyBlock (TR34_KEY_BLOCK).

Import token is not required for TR-31, root public key cerificate or trusted public key certificate.

" + "documentation":"

The method to use for key material import. Import token is only required for TR-34 WrappedKeyBlock (TR34_KEY_BLOCK) and RSA WrappedKeyCryptogram (KEY_CRYPTOGRAM).

Import token is not required for TR-31, root public key cerificate or trusted public key certificate.

" }, "WrappingKeyAlgorithm":{ "shape":"KeyAlgorithm", - "documentation":"

The wrapping key algorithm to generate a wrapping key certificate. This certificate wraps the key under import.

At this time, RSA_2048, RSA_3072, RSA_4096 are the only allowed algorithms for TR-34 WrappedKeyBlock import.

" + "documentation":"

The wrapping key algorithm to generate a wrapping key certificate. This certificate wraps the key under import.

At this time, RSA_2048 is the allowed algorithm for TR-34 WrappedKeyBlock import. Additionally, RSA_2048, RSA_3072, RSA_4096 are the allowed algorithms for RSA WrappedKeyCryptogram import.

" } } }, @@ -804,7 +830,7 @@ }, "WrappingKeyAlgorithm":{ "shape":"KeyAlgorithm", - "documentation":"

The algorithm of the wrapping key for use within TR-34 WrappedKeyBlock.

" + "documentation":"

The algorithm of the wrapping key for use within TR-34 WrappedKeyBlock or RSA WrappedKeyCryptogram.

" }, "WrappingKeyCertificate":{ "shape":"CertificateType", @@ -855,6 +881,35 @@ "min":20, "pattern":"^[0-9A-F]{20}$|^[0-9A-F]{24}$" }, + "ImportKeyCryptogram":{ + "type":"structure", + "required":[ + "Exportable", + "ImportToken", + "KeyAttributes", + "WrappedKeyCryptogram" + ], + "members":{ + "Exportable":{ + "shape":"Boolean", + "documentation":"

Specifies whether the key is exportable from the service.

" + }, + "ImportToken":{ + "shape":"ImportTokenId", + "documentation":"

The import token that initiates key import using the asymmetric RSA wrap and unwrap key exchange method into AWS Payment Cryptography. It expires after 7 days. You can use the same import token to import multiple keys to the same service account.

" + }, + "KeyAttributes":{"shape":"KeyAttributes"}, + "WrappedKeyCryptogram":{ + "shape":"WrappedKeyCryptogram", + "documentation":"

The RSA wrapped key cryptogram under import.

" + }, + "WrappingSpec":{ + "shape":"WrappingKeySpec", + "documentation":"

The wrapping spec for the wrapped key cryptogram.

" + } + }, + "documentation":"

Parameter information for key material import using asymmetric RSA wrap and unwrap key exchange method.

" + }, "ImportKeyInput":{ "type":"structure", "required":["KeyMaterial"], @@ -880,6 +935,10 @@ "ImportKeyMaterial":{ "type":"structure", "members":{ + "KeyCryptogram":{ + "shape":"ImportKeyCryptogram", + "documentation":"

Parameter information for key material import using asymmetric RSA wrap and unwrap key exchange method.

" + }, "RootCertificatePublicKey":{ "shape":"RootCertificatePublicKey", "documentation":"

Parameter information for root public key certificate import.

" @@ -897,7 +956,7 @@ "documentation":"

Parameter information for trusted public key certificate import.

" } }, - "documentation":"

Parameter information for key material import into Amazon Web Services Payment Cryptography using TR-31 or TR-34 key exchange method.

", + "documentation":"

Parameter information for key material import into Amazon Web Services Payment Cryptography using TR-31 or TR-34 or RSA wrap and unwrap key exchange method.

", "union":true }, "ImportKeyOutput":{ @@ -1134,7 +1193,8 @@ "TR34_KEY_BLOCK", "TR31_KEY_BLOCK", "ROOT_PUBLIC_KEY_CERTIFICATE", - "TRUSTED_PUBLIC_KEY_CERTIFICATE" + "TRUSTED_PUBLIC_KEY_CERTIFICATE", + "KEY_CRYPTOGRAM" ] }, "KeyModesOfUse":{ @@ -1256,6 +1316,7 @@ "TR31_K1_KEY_BLOCK_PROTECTION_KEY", "TR31_K3_ASYMMETRIC_KEY_FOR_KEY_AGREEMENT", "TR31_M3_ISO_9797_3_MAC_KEY", + "TR31_M1_ISO_9797_1_MAC_KEY", "TR31_M6_ISO_9797_5_CMAC_KEY", "TR31_M7_HMAC_KEY", "TR31_P0_PIN_ENCRYPTION_KEY", @@ -1673,6 +1734,12 @@ }, "documentation":"

Parameter information for generating a WrappedKeyBlock for key exchange.

" }, + "WrappedKeyCryptogram":{ + "type":"string", + "max":4096, + "min":16, + "pattern":"^[0-9A-F]+$" + }, "WrappedKeyMaterialFormat":{ "type":"string", "enum":[ @@ -1680,6 +1747,13 @@ "TR31_KEY_BLOCK", "TR34_KEY_BLOCK" ] + }, + "WrappingKeySpec":{ + "type":"string", + "enum":[ + "RSA_OAEP_SHA_256", + "RSA_OAEP_SHA_512" + ] } }, "documentation":"

Amazon Web Services Payment Cryptography Control Plane APIs manage encryption keys for use during payment-related cryptographic operations. You can create, import, export, share, manage, and delete keys. You can also manage Identity and Access Management (IAM) policies for keys. For more information, see Identity and access management in the Amazon Web Services Payment Cryptography User Guide.

To use encryption keys for payment-related transaction processing and associated cryptographic operations, you use the Amazon Web Services Payment Cryptography Data Plane. You can perform actions like encrypt, decrypt, generate, and verify payment-related data.

All Amazon Web Services Payment Cryptography API calls must be signed and transmitted using Transport Layer Security (TLS). We recommend you always use the latest supported TLS version for logging API requests.

Amazon Web Services Payment Cryptography supports CloudTrail for control plane operations, a service that logs Amazon Web Services API calls and related events for your Amazon Web Services account and delivers them to an Amazon S3 bucket you specify. By using the information collected by CloudTrail, you can determine what requests were made to Amazon Web Services Payment Cryptography, who made the request, when it was made, and so on. If you don't configure a trail, you can still view the most recent events in the CloudTrail console. For more information, see the CloudTrail User Guide.

" diff -Nru awscli-2.15.9/awscli/botocore/data/personalize/2018-05-22/service-2.json awscli-2.15.22/awscli/botocore/data/personalize/2018-05-22/service-2.json --- awscli-2.15.9/awscli/botocore/data/personalize/2018-05-22/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/personalize/2018-05-22/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -65,7 +65,7 @@ {"shape":"ResourceInUseException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Creates a campaign that deploys a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

Minimum Provisioned TPS and Auto-Scaling

A high minProvisionedTPS will increase your bill. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

A transaction is a single GetRecommendations or GetPersonalizedRanking call. Transactions per second (TPS) is the throughput and unit of billing for Amazon Personalize. The minimum provisioned TPS (minProvisionedTPS) specifies the baseline throughput provisioned by Amazon Personalize, and thus, the minimum billing charge.

If your TPS increases beyond minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS. There's a short time delay while the capacity is increased that might cause loss of transactions.

The actual TPS used is calculated as the average requests/second within a 5-minute window. You pay for maximum of either the minimum provisioned TPS or the actual TPS. We recommend starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

Status

A campaign can be in one of the following states:

To get the campaign status, call DescribeCampaign.

Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

Related APIs

", + "documentation":"

Creates a campaign that deploys a solution version. When a client calls the GetRecommendations and GetPersonalizedRanking APIs, a campaign is specified in the request.

Minimum Provisioned TPS and Auto-Scaling

A high minProvisionedTPS will increase your cost. We recommend starting with 1 for minProvisionedTPS (the default). Track your usage using Amazon CloudWatch metrics, and increase the minProvisionedTPS as necessary.

When you create an Amazon Personalize campaign, you can specify the minimum provisioned transactions per second (minProvisionedTPS) for the campaign. This is the baseline transaction throughput for the campaign provisioned by Amazon Personalize. It sets the minimum billing charge for the campaign while it is active. A transaction is a single GetRecommendations or GetPersonalizedRanking request. The default minProvisionedTPS is 1.

If your TPS increases beyond the minProvisionedTPS, Amazon Personalize auto-scales the provisioned capacity up and down, but never below minProvisionedTPS. There's a short time delay while the capacity is increased that might cause loss of transactions. When your traffic reduces, capacity returns to the minProvisionedTPS.

You are charged for the the minimum provisioned TPS or, if your requests exceed the minProvisionedTPS, the actual TPS. The actual TPS is the total number of recommendation requests you make. We recommend starting with a low minProvisionedTPS, track your usage using Amazon CloudWatch metrics, and then increase the minProvisionedTPS as necessary.

For more information about campaign costs, see Amazon Personalize pricing.

Status

A campaign can be in one of the following states:

To get the campaign status, call DescribeCampaign.

Wait until the status of the campaign is ACTIVE before asking the campaign for recommendations.

Related APIs

", "idempotent":true }, "CreateDataset":{ @@ -1480,7 +1480,7 @@ }, "enableMetadataWithRecommendations":{ "shape":"Boolean", - "documentation":"

Whether metadata with recommendations is enabled for the campaign. If enabled, you can specify the columns from your Items dataset in your request for recommendations. Amazon Personalize returns this data for each item in the recommendation response.

If you enable metadata in recommendations, you will incur additional costs. For more information, see Amazon Personalize pricing.

" + "documentation":"

Whether metadata with recommendations is enabled for the campaign. If enabled, you can specify the columns from your Items dataset in your request for recommendations. Amazon Personalize returns this data for each item in the recommendation response. For information about enabling metadata for a campaign, see Enabling metadata in recommendations for a campaign.

If you enable metadata in recommendations, you will incur additional costs. For more information, see Amazon Personalize pricing.

" } }, "documentation":"

The configuration details of a campaign.

" @@ -2137,11 +2137,11 @@ }, "performAutoML":{ "shape":"PerformAutoML", - "documentation":"

We don't recommend enabling automated machine learning. Instead, match your use case to the available Amazon Personalize recipes. For more information, see Determining your use case.

Whether to perform automated machine learning (AutoML). The default is false. For this case, you must specify recipeArn.

When set to true, Amazon Personalize analyzes your training data and selects the optimal USER_PERSONALIZATION recipe and hyperparameters. In this case, you must omit recipeArn. Amazon Personalize determines the optimal recipe by running tests with different values for the hyperparameters. AutoML lengthens the training process as compared to selecting a specific recipe.

" + "documentation":"

We don't recommend enabling automated machine learning. Instead, match your use case to the available Amazon Personalize recipes. For more information, see Choosing a recipe.

Whether to perform automated machine learning (AutoML). The default is false. For this case, you must specify recipeArn.

When set to true, Amazon Personalize analyzes your training data and selects the optimal USER_PERSONALIZATION recipe and hyperparameters. In this case, you must omit recipeArn. Amazon Personalize determines the optimal recipe by running tests with different values for the hyperparameters. AutoML lengthens the training process as compared to selecting a specific recipe.

" }, "recipeArn":{ "shape":"Arn", - "documentation":"

The ARN of the recipe to use for model training. This is required when performAutoML is false.

" + "documentation":"

The Amazon Resource Name (ARN) of the recipe to use for model training. This is required when performAutoML is false. For information about different Amazon Personalize recipes and their ARNs, see Choosing a recipe.

" }, "datasetGroupArn":{ "shape":"Arn", @@ -4397,7 +4397,7 @@ }, "enableMetadataWithRecommendations":{ "shape":"Boolean", - "documentation":"

Whether metadata with recommendations is enabled for the recommender. If enabled, you can specify the columns from your Items dataset in your request for recommendations. Amazon Personalize returns this data for each item in the recommendation response.

If you enable metadata in recommendations, you will incur additional costs. For more information, see Amazon Personalize pricing.

" + "documentation":"

Whether metadata with recommendations is enabled for the recommender. If enabled, you can specify the columns from your Items dataset in your request for recommendations. Amazon Personalize returns this data for each item in the recommendation response. For information about enabling metadata for a recommender, see Enabling metadata in recommendations for a recommender.

If you enable metadata in recommendations, you will incur additional costs. For more information, see Amazon Personalize pricing.

" } }, "documentation":"

The configuration details of the recommender.

" diff -Nru awscli-2.15.9/awscli/botocore/data/personalize-runtime/2018-05-22/service-2.json awscli-2.15.22/awscli/botocore/data/personalize-runtime/2018-05-22/service-2.json --- awscli-2.15.9/awscli/botocore/data/personalize-runtime/2018-05-22/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/personalize-runtime/2018-05-22/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -194,7 +194,7 @@ }, "metadataColumns":{ "shape":"MetadataColumns", - "documentation":"

If you enabled metadata in recommendations when you created or updated the campaign, specify metadata columns from your Items dataset to include in the personalized ranking. The map key is ITEMS and the value is a list of column names from your Items dataset. The maximum number of columns you can provide is 10.

For information about enabling metadata for a campaign, see Enabling metadata in recommendations for a campaign.

" + "documentation":"

If you enabled metadata in recommendations when you created or updated the campaign, specify metadata columns from your Items dataset to include in the personalized ranking. The map key is ITEMS and the value is a list of column names from your Items dataset. The maximum number of columns you can provide is 10.

For information about enabling metadata for a campaign, see Enabling metadata in recommendations for a campaign.

" } } }, @@ -252,7 +252,7 @@ }, "metadataColumns":{ "shape":"MetadataColumns", - "documentation":"

If you enabled metadata in recommendations when you created or updated the campaign or recommender, specify the metadata columns from your Items dataset to include in item recommendations. The map key is ITEMS and the value is a list of column names from your Items dataset. The maximum number of columns you can provide is 10.

For information about enabling metadata for a campaign, see Enabling metadata in recommendations for a campaign. For information about enabling metadata for a recommender, see Enabling metadata in recommendations for a recommender.

" + "documentation":"

If you enabled metadata in recommendations when you created or updated the campaign or recommender, specify the metadata columns from your Items dataset to include in item recommendations. The map key is ITEMS and the value is a list of column names from your Items dataset. The maximum number of columns you can provide is 10.

For information about enabling metadata for a campaign, see Enabling metadata in recommendations for a campaign. For information about enabling metadata for a recommender, see Enabling metadata in recommendations for a recommender.

" } } }, diff -Nru awscli-2.15.9/awscli/botocore/data/polly/2016-06-10/service-2.json awscli-2.15.22/awscli/botocore/data/polly/2016-06-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/polly/2016-06-10/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/polly/2016-06-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1112,7 +1112,8 @@ "Isabelle", "Zayd", "Danielle", - "Gregory" + "Gregory", + "Burcu" ] }, "VoiceList":{ diff -Nru awscli-2.15.9/awscli/botocore/data/pricing/2017-10-15/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/pricing/2017-10-15/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/pricing/2017-10-15/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/pricing/2017-10-15/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/pricing/2017-10-15/service-2.json awscli-2.15.22/awscli/botocore/data/pricing/2017-10-15/service-2.json --- awscli-2.15.9/awscli/botocore/data/pricing/2017-10-15/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/pricing/2017-10-15/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -27,6 +27,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NotFoundException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

Returns the metadata for one service or a list of the metadata for all services. Use this without a service code to get the service codes for all services. Use it with a service code, such as AmazonEC2, to get information specific to that service, such as the attribute names available for that service. For example, some of the attribute names available for EC2 are volumeType, maxIopsVolume, operation, locationType, and instanceCapacity10xlarge.

" @@ -44,6 +45,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NotFoundException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Billing and Cost Management User Guide.

" @@ -60,7 +62,8 @@ {"shape":"InvalidParameterException"}, {"shape":"NotFoundException"}, {"shape":"AccessDeniedException"}, - {"shape":"InternalErrorException"} + {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"} ], "documentation":"

This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

This returns the URL that you can retrieve your Price List file from. This URL is based on the PriceListArn and FileFormat that you retrieve from the ListPriceLists response.

" }, @@ -77,6 +80,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"NotFoundException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

Returns a list of all products that match the filter criteria.

" @@ -95,6 +99,7 @@ {"shape":"NotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalErrorException"}, + {"shape":"ThrottlingException"}, {"shape":"ExpiredNextTokenException"} ], "documentation":"

This feature is in preview release and is subject to change. Your use of Amazon Web Services Price List API is subject to the Beta Service Participation terms of the Amazon Web Services Service Terms (Section 1.10).

This returns a list of Price List references that the requester if authorized to view, given a ServiceCode, CurrencyCode, and an EffectiveDate. Use without a RegionCode filter to list Price List references from all available Amazon Web Services Regions. Use with a RegionCode filter to get the Price List reference that's specific to a specific Amazon Web Services Region. You can use the PriceListArn from the response to get your preferred Price List files through the GetPriceListFileUrl API.

" @@ -338,7 +343,8 @@ }, "documentation":"

An error on the server occurred during the processing of your request. Try again later.

", "exception":true, - "fault":true + "fault":true, + "retryable":{"throttling":false} }, "InvalidNextTokenException":{ "type":"structure", @@ -443,7 +449,7 @@ "type":"string", "max":2048, "min":18, - "pattern":"arn:[A-Za-z0-9][-.A-Za-z0-9]{0,62}:pricing:::price-list/[A-Za-z0-9_/.-]{1,1023}" + "pattern":"arn:[A-Za-z0-9][-.A-Za-z0-9]{0,62}:pricing:::price-list/[A-Za-z0-9+_/.-]{1,1023}" }, "PriceListJsonItems":{ "type":"list", @@ -487,6 +493,15 @@ }, "String":{"type":"string"}, "SynthesizedJsonPriceListJsonItem":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"errorMessage"} + }, + "documentation":"

You've made too many requests exceeding service quotas.

", + "exception":true, + "retryable":{"throttling":true} + }, "errorMessage":{"type":"string"} }, "documentation":"

The Amazon Web Services Price List API is a centralized and convenient way to programmatically query Amazon Web Services for services, products, and pricing information. The Amazon Web Services Price List uses standardized product attributes such as Location, Storage Class, and Operating System, and provides prices at the SKU level. You can use the Amazon Web Services Price List to do the following:

Use GetServices without a service code to retrieve the service codes for all Amazon Web Services, then GetServices with a service code to retrieve the attribute names for that service. After you have the service code and attribute names, you can use GetAttributeValues to see what values are available for an attribute. With the service code and an attribute name and value, you can use GetProducts to find specific products that you're interested in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType.

For more information, see Using the Amazon Web Services Price List API in the Billing User Guide.

" diff -Nru awscli-2.15.9/awscli/botocore/data/qbusiness/2023-11-27/service-2.json awscli-2.15.22/awscli/botocore/data/qbusiness/2023-11-27/service-2.json --- awscli-2.15.9/awscli/botocore/data/qbusiness/2023-11-27/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/qbusiness/2023-11-27/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -131,7 +131,7 @@ {"shape":"AccessDeniedException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates an Amazon Q index.

To determine if index creation has completed, check the Status field returned from a call to DescribeIndex. The Status field is set to ACTIVE when the index is ready to use.

Once the index is active, you can index your documents using the BatchPutDocument API or the CreateDataSource API.

" + "documentation":"

Creates an Amazon Q index.

To determine if index creation has completed, check the Status field returned from a call to DescribeIndex. The Status field is set to ACTIVE when the index is ready to use.

Once the index is active, you can index your documents using the BatchPutDocument API or the CreateDataSource API.

" }, "CreatePlugin":{ "name":"CreatePlugin", @@ -1455,7 +1455,7 @@ "documentation":"

Performs a logical OR operation on all supplied filters.

" } }, - "documentation":"

Enables filtering of Amazon Q web experience responses based on document attributes or metadata fields.

" + "documentation":"

Enables filtering of responses based on document attributes or metadata fields.

" }, "AttributeFilters":{ "type":"list", @@ -1623,6 +1623,12 @@ "type":"boolean", "box":true }, + "BoostingDurationInSeconds":{ + "type":"long", + "box":true, + "max":999999999, + "min":0 + }, "ChatSyncInput":{ "type":"structure", "required":[ @@ -2351,6 +2357,21 @@ "type":"list", "member":{"shape":"DataSource"} }, + "DateAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "boostingDurationInSeconds":{ + "shape":"BoostingDurationInSeconds", + "documentation":"

Specifies the duration, in seconds, of a boost applies to a DATE type document attribute.

" + }, + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

Specifies how much a document attribute is boosted.

" + } + }, + "documentation":"

Provides information on boosting DATE type document attributes.

For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

" + }, "DeleteApplicationRequest":{ "type":"structure", "required":["applicationId"], @@ -2694,6 +2715,45 @@ }, "documentation":"

A document attribute or metadata field.

" }, + "DocumentAttributeBoostingConfiguration":{ + "type":"structure", + "members":{ + "dateConfiguration":{ + "shape":"DateAttributeBoostingConfiguration", + "documentation":"

Provides information on boosting DATE type document attributes.

" + }, + "numberConfiguration":{ + "shape":"NumberAttributeBoostingConfiguration", + "documentation":"

Provides information on boosting NUMBER type document attributes.

" + }, + "stringConfiguration":{ + "shape":"StringAttributeBoostingConfiguration", + "documentation":"

Provides information on boosting STRING type document attributes.

" + }, + "stringListConfiguration":{ + "shape":"StringListAttributeBoostingConfiguration", + "documentation":"

Provides information on boosting STRING_LIST type document attributes.

" + } + }, + "documentation":"

Provides information on boosting supported Amazon Q document attribute types. When an end user chat query matches document attributes that have been boosted, Amazon Q prioritizes generating responses from content that matches the boosted document attributes.

For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

", + "union":true + }, + "DocumentAttributeBoostingLevel":{ + "type":"string", + "enum":[ + "NONE", + "LOW", + "MEDIUM", + "HIGH", + "VERY_HIGH" + ] + }, + "DocumentAttributeBoostingOverrideMap":{ + "type":"map", + "key":{"shape":"DocumentAttributeKey"}, + "value":{"shape":"DocumentAttributeBoostingConfiguration"}, + "min":1 + }, "DocumentAttributeCondition":{ "type":"structure", "required":[ @@ -2711,7 +2771,7 @@ }, "value":{"shape":"DocumentAttributeValue"} }, - "documentation":"

The condition used for the target document attribute or metadata field when ingesting documents into Amazon Q. You use this with DocumentAttributeTarget to apply the condition.

For example, you can create the 'Department' target field and have it prefill department names associated with the documents based on information in the 'Source_URI' field. Set the condition that if the 'Source_URI' field contains 'financial' in its URI value, then prefill the target field 'Department' with the target value 'Finance' for the document.

Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget. Amazon Q then will map your newly created metadata field to your index field.

" + "documentation":"

The condition used for the target document attribute or metadata field when ingesting documents into Amazon Q. You use this with DocumentAttributeTarget to apply the condition.

For example, you can create the 'Department' target field and have it prefill department names associated with the documents based on information in the 'Source_URI' field. Set the condition that if the 'Source_URI' field contains 'financial' in its URI value, then prefill the target field 'Department' with the target value 'Finance' for the document.

Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget. Amazon Q then will map your newly created metadata field to your index field.

" }, "DocumentAttributeConfiguration":{ "type":"structure", @@ -2761,7 +2821,7 @@ }, "value":{"shape":"DocumentAttributeValue"} }, - "documentation":"

The target document attribute or metadata field you want to alter when ingesting documents into Amazon Q.

For example, you can delete all customer identification numbers associated with the documents, stored in the document metadata field called 'Customer_ID' by setting the target key as 'Customer_ID' and the deletion flag to TRUE. This removes all customer ID values in the field 'Customer_ID'. This would scrub personally identifiable information from each document's metadata.

Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget . Amazon Q will then map your newly created document attribute to your index field.

You can also use this with DocumentAttributeCondition .

" + "documentation":"

The target document attribute or metadata field you want to alter when ingesting documents into Amazon Q.

For example, you can delete all customer identification numbers associated with the documents, stored in the document metadata field called 'Customer_ID' by setting the target key as 'Customer_ID' and the deletion flag to TRUE. This removes all customer ID values in the field 'Customer_ID'. This would scrub personally identifiable information from each document's metadata.

Amazon Q can't create a target field if it has not already been created as an index field. After you create your index field, you can create a document metadata field using DocumentAttributeTarget . Amazon Q will then map your newly created document attribute to your index field.

You can also use this with DocumentAttributeCondition .

" }, "DocumentAttributeValue":{ "type":"structure", @@ -3639,7 +3699,7 @@ "documentation":"

Stores the original, raw documents or the structured, parsed documents before and after altering them. For more information, see Data contracts for Lambda functions.

" } }, - "documentation":"

Provides the configuration information for invoking a Lambda function in Lambda to alter document metadata and content when ingesting documents into Amazon Q.

You can configure your Lambda function using PreExtractionHookConfiguration if you want to apply advanced alterations on the original or raw documents.

If you want to apply advanced alterations on the Amazon Q structured documents, you must configure your Lambda function using PostExtractionHookConfiguration.

You can only invoke one Lambda function. However, this function can invoke other functions it requires.

For more information, see Custom document enrichment.

" + "documentation":"

Provides the configuration information for invoking a Lambda function in Lambda to alter document metadata and content when ingesting documents into Amazon Q.

You can configure your Lambda function using PreExtractionHookConfiguration if you want to apply advanced alterations on the original or raw documents.

If you want to apply advanced alterations on the Amazon Q structured documents, you must configure your Lambda function using PostExtractionHookConfiguration.

You can only invoke one Lambda function. However, this function can invoke other functions it requires.

For more information, see Custom document enrichment.

" }, "Index":{ "type":"structure", @@ -3744,7 +3804,7 @@ }, "target":{"shape":"DocumentAttributeTarget"} }, - "documentation":"

Provides the configuration information for applying basic logic to alter document metadata and content when ingesting documents into Amazon Q.

To apply advanced logic, to go beyond what you can do with basic logic, see HookConfiguration .

For more information, see Custom document enrichment.

" + "documentation":"

Provides the configuration information for applying basic logic to alter document metadata and content when ingesting documents into Amazon Q.

To apply advanced logic, to go beyond what you can do with basic logic, see HookConfiguration .

For more information, see Custom document enrichment.

" }, "InlineDocumentEnrichmentConfigurations":{ "type":"list", @@ -4567,7 +4627,11 @@ "FACTUALLY_CORRECT", "COMPLETE", "RELEVANT_SOURCES", - "HELPFUL" + "HELPFUL", + "NOT_BASED_ON_DOCUMENTS", + "NOT_COMPLETE", + "NOT_CONCISE", + "OTHER" ] }, "Messages":{ @@ -4582,6 +4646,10 @@ "type":"structure", "required":["indexId"], "members":{ + "boostingOverride":{ + "shape":"DocumentAttributeBoostingOverrideMap", + "documentation":"

Overrides the default boosts applied by Amazon Q to supported document attribute data types.

" + }, "indexId":{ "shape":"IndexId", "documentation":"

The identifier for the Amazon Q index.

" @@ -4594,6 +4662,28 @@ "max":800, "min":1 }, + "NumberAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

Specifies the duration, in seconds, of a boost applies to a NUMBER type document attribute.

" + }, + "boostingType":{ + "shape":"NumberAttributeBoostingType", + "documentation":"

Specifies how much a document attribute is boosted.

" + } + }, + "documentation":"

Provides information on boosting NUMBER type document attributes.

For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

" + }, + "NumberAttributeBoostingType":{ + "type":"string", + "enum":[ + "PRIORITIZE_LARGER_VALUES", + "PRIORITIZE_SMALLER_VALUES" + ] + }, "OAuth2ClientCredentialConfiguration":{ "type":"structure", "required":[ @@ -4985,7 +5075,7 @@ }, "ruleType":{ "shape":"RuleType", - "documentation":"

The type fo rule.

" + "documentation":"

The type of rule.

" } }, "documentation":"

Guardrail rules for an Amazon Q application. Amazon Q supports only one rule at a time.

" @@ -5244,6 +5334,48 @@ "max":2048, "min":1 }, + "StringAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "attributeValueBoosting":{ + "shape":"StringAttributeValueBoosting", + "documentation":"

Specifies specific values of a STRING type document attribute being boosted.

" + }, + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

Specifies how much a document attribute is boosted.

" + } + }, + "documentation":"

Provides information on boosting STRING type document attributes.

For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

" + }, + "StringAttributeValueBoosting":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"StringAttributeValueBoostingLevel"}, + "max":10, + "min":1 + }, + "StringAttributeValueBoostingLevel":{ + "type":"string", + "enum":[ + "LOW", + "MEDIUM", + "HIGH", + "VERY_HIGH" + ] + }, + "StringListAttributeBoostingConfiguration":{ + "type":"structure", + "required":["boostingLevel"], + "members":{ + "boostingLevel":{ + "shape":"DocumentAttributeBoostingLevel", + "documentation":"

Specifies how much a document attribute is boosted.

" + } + }, + "documentation":"

Provides information on boosting STRING_LIST type document attributes.

For STRING and STRING_LIST type document attributes to be used for boosting on the console and the API, they must be enabled for search using the DocumentAttributeConfiguration object of the UpdateIndex API. If you haven't enabled searching on these attributes, you can't boost attributes of these data types on either the console or the API.

For more information on how boosting document attributes work in Amazon Q, see Boosting using document attributes.

" + }, "SubnetId":{ "type":"string", "max":200, @@ -5396,7 +5528,7 @@ "members":{ "description":{ "shape":"TopicDescription", - "documentation":"

A description for your topic control configuration. Use this outline how the large language model (LLM) should use this topic control configuration.

" + "documentation":"

A description for your topic control configuration. Use this to outline how the large language model (LLM) should use this topic control configuration.

" }, "exampleChatMessages":{ "shape":"ExampleChatMessages", @@ -5995,5 +6127,5 @@ "member":{"shape":"WebExperience"} } }, - "documentation":"

" + "documentation":"

Amazon Q is in preview release and is subject to change.

This is the Amazon Q (for business use) API Reference. Amazon Q is a fully managed, generative-AI powered enterprise chat assistant that you can deploy within your organization. Amazon Q enhances employee productivity by supporting key tasks such as question-answering, knowledge discovery, writing email messages, summarizing text, drafting document outlines, and brainstorming ideas. Users ask questions of Amazon Q and get answers that are presented in a conversational manner. For an introduction to the service, see the Amazon Q (for business use) Developer Guide .

For an overview of the Amazon Q APIs, see Overview of Amazon Q API operations.

For information about the IAM access control permissions you need to use this API, see IAM roles for Amazon Q in the Amazon Q (for business use) Developer Guide.

You can use the following AWS SDKs to access Amazon Q APIs:

The following resources provide additional information about using the Amazon Q API:

" } diff -Nru awscli-2.15.9/awscli/botocore/data/qconnect/2020-10-19/service-2.json awscli-2.15.22/awscli/botocore/data/qconnect/2020-10-19/service-2.json --- awscli-2.15.9/awscli/botocore/data/qconnect/2020-10-19/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/qconnect/2020-10-19/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -352,7 +352,9 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves recommendations for the specified session. To avoid retrieving the same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the default behavior and only returns recommendations already available. To perform a manual query against an assistant, use QueryAssistant.

" + "documentation":"

Retrieves recommendations for the specified session. To avoid retrieving the same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the default behavior and only returns recommendations already available. To perform a manual query against an assistant, use QueryAssistant.

", + "deprecated":true, + "deprecatedMessage":"GetRecommendations API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications." }, "GetSession":{ "name":"GetSession", @@ -526,7 +528,9 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Performs a manual search against the specified assistant. To retrieve recommendations for an assistant, use GetRecommendations.

" + "documentation":"

Performs a manual search against the specified assistant. To retrieve recommendations for an assistant, use GetRecommendations.

", + "deprecated":true, + "deprecatedMessage":"QueryAssistant API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications." }, "RemoveKnowledgeBaseTemplateUri":{ "name":"RemoveKnowledgeBaseTemplateUri", @@ -3274,7 +3278,7 @@ }, "QuickResponseName":{ "type":"string", - "max":40, + "max":100, "min":1 }, "QuickResponseOrderField":{ diff -Nru awscli-2.15.9/awscli/botocore/data/quicksight/2018-04-01/service-2.json awscli-2.15.22/awscli/botocore/data/quicksight/2018-04-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/quicksight/2018-04-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/quicksight/2018-04-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -5437,6 +5437,10 @@ "ContributionAnalysisDefaults":{ "shape":"ContributionAnalysisDefaultList", "documentation":"

The contribution analysis (anomaly configuration) setup of the visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a BarChartVisual.

" @@ -5710,6 +5714,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The palette (chart color) display setup of the visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a BoxPlotVisual.

" @@ -6688,6 +6696,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The palette (chart color) display setup of the visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a ComboChartVisual.

" @@ -7037,6 +7049,16 @@ "COLLECTIVE" ] }, + "ContextMenuOption":{ + "type":"structure", + "members":{ + "AvailabilityStatus":{ + "shape":"DashboardBehavior", + "documentation":"

The availability status of the context menu options. If the value of this property is set to ENABLED, dashboard readers can interact with the context menu.

" + } + }, + "documentation":"

The context menu options for a visual's interactions.

" + }, "ContributionAnalysisDefault":{ "type":"structure", "required":[ @@ -8743,6 +8765,10 @@ "ImageScaling":{ "shape":"CustomContentImageScalingConfiguration", "documentation":"

The sizing options for the size of the custom content visual. This structure is required when the ContentType of the visual is 'IMAGE'.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a CustomContentVisual.

" @@ -14937,6 +14963,10 @@ "MapStyleOptions":{ "shape":"GeospatialMapStyleOptions", "documentation":"

The map style options of the filled map visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration for a FilledMapVisual.

" @@ -16033,6 +16063,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The visual palette configuration of a FunnelChartVisual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a FunnelChartVisual.

" @@ -16207,6 +16241,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The visual palette configuration of a GaugeChartVisual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a GaugeChartVisual.

" @@ -16568,7 +16606,11 @@ "shape":"GeospatialPointStyleOptions", "documentation":"

The point style options of the geospatial map.

" }, - "VisualPalette":{"shape":"VisualPalette"} + "VisualPalette":{"shape":"VisualPalette"}, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" + } }, "documentation":"

The configuration of a GeospatialMapVisual.

" }, @@ -17171,6 +17213,10 @@ "Tooltip":{ "shape":"TooltipOptions", "documentation":"

The tooltip display setup of the visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a heat map.

" @@ -17335,6 +17381,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The visual palette configuration of a histogram.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration for a HistogramVisual.

" @@ -17747,6 +17797,10 @@ "CustomNarrative":{ "shape":"CustomNarrativeOptions", "documentation":"

The custom narrative of the insight visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of an insight visual.

" @@ -18146,6 +18200,10 @@ "KPIOptions":{ "shape":"KPIOptions", "documentation":"

The options that determine the presentation of a KPI visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a KPI visual.

" @@ -18565,6 +18623,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The visual palette configuration of a line chart.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a line chart.

" @@ -22143,6 +22205,10 @@ "ContributionAnalysisDefaults":{ "shape":"ContributionAnalysisDefaultList", "documentation":"

The contribution analysis (anomaly configuration) setup of the visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a pie chart.

" @@ -22353,6 +22419,10 @@ "PaginatedReportOptions":{ "shape":"PivotTablePaginatedReportOptions", "documentation":"

The paginated report options for a pivot table visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration for a PivotTableVisual.

" @@ -23095,6 +23165,10 @@ "AxesRangeScale":{ "shape":"RadarChartAxesRangeScale", "documentation":"

The axis behavior options of a radar chart.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a RadarChartVisual.

" @@ -24375,6 +24449,10 @@ "DataLabels":{ "shape":"DataLabelOptions", "documentation":"

The data label configuration of a sankey diagram.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a sankey diagram.

" @@ -24502,6 +24580,10 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The palette (chart color) display setup of the visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a scatter plot.

" @@ -26838,6 +26920,10 @@ "TableInlineVisualizations":{ "shape":"TableInlineVisualizationList", "documentation":"

A collection of inline visualizations to display within a chart.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration for a TableVisual.

" @@ -29052,6 +29138,10 @@ "Tooltip":{ "shape":"TooltipOptions", "documentation":"

The tooltip display setup of the visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a tree map.

" @@ -30171,11 +30261,11 @@ }, "GrantPermissions":{ "shape":"ResourcePermissionList", - "documentation":"

The permissions that you want to grant on a resource.

" + "documentation":"

The permissions that you want to grant on a resource. Namespace ARNs are not supported Principal values for folder permissions.

" }, "RevokePermissions":{ "shape":"ResourcePermissionList", - "documentation":"

The permissions that you want to revoke from a resource.

" + "documentation":"

The permissions that you want to revoke from a resource. Namespace ARNs are not supported Principal values for folder permissions.

" } } }, @@ -31752,6 +31842,20 @@ "DATA_POINT_MENU" ] }, + "VisualInteractionOptions":{ + "type":"structure", + "members":{ + "VisualMenuOption":{ + "shape":"VisualMenuOption", + "documentation":"

The on-visual menu options for a visual.

" + }, + "ContextMenuOption":{ + "shape":"ContextMenuOption", + "documentation":"

The context menu options for a visual.

" + } + }, + "documentation":"

The general visual interactions setup for visual publish options

" + }, "VisualList":{ "type":"list", "member":{"shape":"Visual"}, @@ -31842,6 +31946,16 @@ }, "documentation":"

The field well configuration of a waterfall visual.

" }, + "WaterfallChartColorConfiguration":{ + "type":"structure", + "members":{ + "GroupColorConfiguration":{ + "shape":"WaterfallChartGroupColorConfiguration", + "documentation":"

The color configuration for individual groups within a waterfall visual.

" + } + }, + "documentation":"

The color configuration of a waterfall visual.

" + }, "WaterfallChartConfiguration":{ "type":"structure", "members":{ @@ -31884,6 +31998,14 @@ "VisualPalette":{ "shape":"VisualPalette", "documentation":"

The visual palette configuration of a waterfall visual.

" + }, + "ColorConfiguration":{ + "shape":"WaterfallChartColorConfiguration", + "documentation":"

The color configuration of a waterfall visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration for a waterfall visual.

" @@ -31898,6 +32020,24 @@ }, "documentation":"

The field well configuration of a waterfall visual.

" }, + "WaterfallChartGroupColorConfiguration":{ + "type":"structure", + "members":{ + "PositiveBarColor":{ + "shape":"HexColor", + "documentation":"

Defines the color for the positive bars of a waterfall chart.

" + }, + "NegativeBarColor":{ + "shape":"HexColor", + "documentation":"

Defines the color for the negative bars of a waterfall chart.

" + }, + "TotalBarColor":{ + "shape":"HexColor", + "documentation":"

Defines the color for the total bars of a waterfall chart.

" + } + }, + "documentation":"

The color configuration for individual groups within a waterfall visual.

" + }, "WaterfallChartOptions":{ "type":"structure", "members":{ @@ -32033,6 +32173,10 @@ "WordCloudOptions":{ "shape":"WordCloudOptions", "documentation":"

The options for a word cloud visual.

" + }, + "Interactions":{ + "shape":"VisualInteractionOptions", + "documentation":"

The general visual interactions setup for a visual.

" } }, "documentation":"

The configuration of a word cloud visual.

" diff -Nru awscli-2.15.9/awscli/botocore/data/rds/2014-10-31/service-2.json awscli-2.15.22/awscli/botocore/data/rds/2014-10-31/service-2.json --- awscli-2.15.9/awscli/botocore/data/rds/2014-10-31/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/rds/2014-10-31/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -305,6 +305,7 @@ }, "errors":[ {"shape":"DBClusterAlreadyExistsFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"}, {"shape":"InsufficientStorageClusterCapacityFault"}, {"shape":"DBClusterQuotaExceededFault"}, {"shape":"StorageQuotaExceededFault"}, @@ -533,6 +534,28 @@ ], "documentation":"

Creates a new DB security group. DB security groups control access to a DB instance.

A DB security group controls access to EC2-Classic DB instances that are not in a VPC.

EC2-Classic was retired on August 15, 2022. If you haven't migrated from EC2-Classic to a VPC, we recommend that you migrate as soon as possible. For more information, see Migrate from EC2-Classic to a VPC in the Amazon EC2 User Guide, the blog EC2-Classic Networking is Retiring – Here’s How to Prepare, and Moving a DB instance not in a VPC into a VPC in the Amazon RDS User Guide.

" }, + "CreateDBShardGroup":{ + "name":"CreateDBShardGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDBShardGroupMessage"}, + "output":{ + "shape":"DBShardGroup", + "resultWrapper":"CreateDBShardGroupResult" + }, + "errors":[ + {"shape":"DBShardGroupAlreadyExistsFault"}, + {"shape":"DBClusterNotFoundFault"}, + {"shape":"MaxDBShardGroupLimitReached"}, + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"InvalidMaxAcuFault"}, + {"shape":"UnsupportedDBEngineVersionFault"}, + {"shape":"InvalidVPCNetworkStateFault"} + ], + "documentation":"

Creates a new DB shard group for Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group.

Valid for: Aurora DB clusters only

" + }, "CreateDBSnapshot":{ "name":"CreateDBSnapshot", "http":{ @@ -888,6 +911,24 @@ ], "documentation":"

Deletes a DB security group.

The specified DB security group must not be associated with any DB instances.

EC2-Classic was retired on August 15, 2022. If you haven't migrated from EC2-Classic to a VPC, we recommend that you migrate as soon as possible. For more information, see Migrate from EC2-Classic to a VPC in the Amazon EC2 User Guide, the blog EC2-Classic Networking is Retiring – Here’s How to Prepare, and Moving a DB instance not in a VPC into a VPC in the Amazon RDS User Guide.

" }, + "DeleteDBShardGroup":{ + "name":"DeleteDBShardGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDBShardGroupMessage"}, + "output":{ + "shape":"DBShardGroup", + "resultWrapper":"DeleteDBShardGroupResult" + }, + "errors":[ + {"shape":"DBShardGroupNotFoundFault"}, + {"shape":"InvalidDBShardGroupStateFault"}, + {"shape":"InvalidDBClusterStateFault"} + ], + "documentation":"

Deletes an Aurora Limitless Database DB shard group.

" + }, "DeleteDBSnapshot":{ "name":"DeleteDBSnapshot", "http":{ @@ -1387,6 +1428,23 @@ ], "documentation":"

Returns a list of DBSecurityGroup descriptions. If a DBSecurityGroupName is specified, the list will contain only the descriptions of the specified DB security group.

EC2-Classic was retired on August 15, 2022. If you haven't migrated from EC2-Classic to a VPC, we recommend that you migrate as soon as possible. For more information, see Migrate from EC2-Classic to a VPC in the Amazon EC2 User Guide, the blog EC2-Classic Networking is Retiring – Here’s How to Prepare, and Moving a DB instance not in a VPC into a VPC in the Amazon RDS User Guide.

" }, + "DescribeDBShardGroups":{ + "name":"DescribeDBShardGroups", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDBShardGroupsMessage"}, + "output":{ + "shape":"DescribeDBShardGroupsResponse", + "resultWrapper":"DescribeDBShardGroupsResult" + }, + "errors":[ + {"shape":"DBShardGroupNotFoundFault"}, + {"shape":"DBClusterNotFoundFault"} + ], + "documentation":"

Describes existing Aurora Limitless Database DB shard groups.

" + }, "DescribeDBSnapshotAttributes":{ "name":"DescribeDBSnapshotAttributes", "http":{ @@ -1770,7 +1828,7 @@ {"shape":"InvalidDBClusterStateFault"}, {"shape":"InvalidDBInstanceStateFault"} ], - "documentation":"

Forces a failover for a DB cluster.

For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).

For a Multi-AZ DB cluster, failover for a DB cluster promotes one of the readable standby DB instances (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).

An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby DB instance when the primary DB instance fails.

To simulate a failure of a primary instance for testing, you can force a failover. Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

" + "documentation":"

Forces a failover for a DB cluster.

For an Aurora DB cluster, failover for a DB cluster promotes one of the Aurora Replicas (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer).

For a Multi-AZ DB cluster, after RDS terminates the primary DB instance, the internal monitoring system detects that the primary DB instance is unhealthy and promotes a readable standby (read-only instances) in the DB cluster to be the primary DB instance (the cluster writer). Failover times are typically less than 35 seconds.

An Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB cluster automatically fails over to a readable standby DB instance when the primary DB instance fails.

To simulate a failure of a primary instance for testing, you can force a failover. Because each instance in a DB cluster has its own endpoint address, make sure to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

" }, "FailoverGlobalCluster":{ "name":"FailoverGlobalCluster", @@ -2091,6 +2149,25 @@ "errors":[], "documentation":"

Updates the recommendation status and recommended action status for the specified recommendation.

" }, + "ModifyDBShardGroup":{ + "name":"ModifyDBShardGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ModifyDBShardGroupMessage"}, + "output":{ + "shape":"DBShardGroup", + "resultWrapper":"ModifyDBShardGroupResult" + }, + "errors":[ + {"shape":"InvalidDBClusterStateFault"}, + {"shape":"DBShardGroupAlreadyExistsFault"}, + {"shape":"DBShardGroupNotFoundFault"}, + {"shape":"InvalidMaxAcuFault"} + ], + "documentation":"

Modifies the settings of an Aurora Limitless Database DB shard group. You can change one or more settings by specifying these parameters and the new values in the request.

" + }, "ModifyDBSnapshot":{ "name":"ModifyDBSnapshot", "http":{ @@ -2308,6 +2385,23 @@ ], "documentation":"

You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect.

Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting.

For more information about rebooting, see Rebooting a DB Instance in the Amazon RDS User Guide.

This command doesn't apply to RDS Custom.

If your DB instance is part of a Multi-AZ DB cluster, you can reboot the DB cluster with the RebootDBCluster operation.

" }, + "RebootDBShardGroup":{ + "name":"RebootDBShardGroup", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDBShardGroupMessage"}, + "output":{ + "shape":"DBShardGroup", + "resultWrapper":"RebootDBShardGroupResult" + }, + "errors":[ + {"shape":"DBShardGroupNotFoundFault"}, + {"shape":"InvalidDBShardGroupStateFault"} + ], + "documentation":"

You might need to reboot your DB shard group, usually for maintenance reasons. For example, if you make certain modifications, reboot the DB shard group for the changes to take effect.

This operation applies only to Aurora Limitless Database DBb shard groups.

" + }, "RegisterDBProxyTargets":{ "name":"RegisterDBProxyTargets", "http":{ @@ -2511,7 +2605,8 @@ {"shape":"KMSKeyNotAccessibleFault"}, {"shape":"DomainNotFoundFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"}, - {"shape":"InvalidDBInstanceStateFault"} + {"shape":"InvalidDBInstanceStateFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"} ], "documentation":"

Creates a new DB cluster from a DB snapshot or DB cluster snapshot.

The target DB cluster is created from the source snapshot with a default configuration. If you don't specify a security group, the new DB cluster is associated with the default security group.

This operation only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance operation to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterFromSnapshot operation has completed and the DB cluster is available.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

" }, @@ -2545,7 +2640,8 @@ {"shape":"StorageQuotaExceededFault"}, {"shape":"DomainNotFoundFault"}, {"shape":"DBClusterParameterGroupNotFoundFault"}, - {"shape":"DBClusterAutomatedBackupNotFoundFault"} + {"shape":"DBClusterAutomatedBackupNotFoundFault"}, + {"shape":"InsufficientDBInstanceCapacityFault"} ], "documentation":"

Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before LatestRestorableTime for up to BackupRetentionPeriod days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group.

For Aurora, this operation only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the CreateDBInstance operation to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in DBClusterIdentifier. You can create DB instances only after the RestoreDBClusterToPointInTime operation has completed and the DB cluster is available.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ DB cluster deployments in the Amazon RDS User Guide.

" }, @@ -3876,7 +3972,7 @@ }, "TargetDBInstanceClass":{ "shape":"TargetDBInstanceClass", - "documentation":"

Specify the DB instance class for the databases in the green environment.

" + "documentation":"

Specify the DB instance class for the databases in the green environment.

This parameter only applies to RDS DB instances, because DB instances within an Aurora DB cluster can have multiple different instance classes. If you're creating a blue/green deployment from an Aurora DB cluster, don't specify this parameter. After the green environment is created, you can individually modify the instance classes of the DB instances within the green DB cluster.

" }, "UpgradeTargetStorageConfig":{ "shape":"BooleanOptional", @@ -4011,7 +4107,7 @@ }, "DBClusterIdentifier":{ "shape":"String", - "documentation":"

The identifier for this DB cluster. This parameter is stored as a lowercase string.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

Example: my-cluster1

" + "documentation":"

The identifier for this DB cluster. This parameter is stored as a lowercase string.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Constraints:

Example: my-cluster1

" }, "DBClusterParameterGroupName":{ "shape":"String", @@ -4173,6 +4269,10 @@ "shape":"IntegerOptional", "documentation":"

The number of days to retain Performance Insights data.

Valid for Cluster Type: Multi-AZ DB clusters only

Valid Values:

Default: 7 days

If you specify a retention period that isn't valid, such as 94, Amazon RDS issues an error.

" }, + "EnableLimitlessDatabase":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group.

Valid for: Aurora DB clusters only

" + }, "ServerlessV2ScalingConfiguration":{"shape":"ServerlessV2ScalingConfiguration"}, "NetworkType":{ "shape":"String", @@ -4211,7 +4311,7 @@ }, "DBParameterGroupFamily":{ "shape":"String", - "documentation":"

The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

Aurora MySQL

Example: aurora-mysql5.7, aurora-mysql8.0

Aurora PostgreSQL

Example: aurora-postgresql14

RDS for MySQL

Example: mysql8.0

RDS for PostgreSQL

Example: postgres12

To list all of the available parameter group families for a DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine <engine>

For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql

The output contains duplicates.

The following are the valid DB engine values:

" + "documentation":"

The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

Aurora MySQL

Example: aurora-mysql5.7, aurora-mysql8.0

Aurora PostgreSQL

Example: aurora-postgresql14

RDS for MySQL

Example: mysql8.0

RDS for PostgreSQL

Example: postgres13

To list all of the available parameter group families for a DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine <engine>

For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql

The output contains duplicates.

The following are the valid DB engine values:

" }, "Description":{ "shape":"String", @@ -4876,6 +4976,36 @@ "DBSecurityGroup":{"shape":"DBSecurityGroup"} } }, + "CreateDBShardGroupMessage":{ + "type":"structure", + "required":[ + "DBShardGroupIdentifier", + "DBClusterIdentifier", + "MaxACU" + ], + "members":{ + "DBShardGroupIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB shard group.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the primary DB cluster for the DB shard group.

" + }, + "ComputeRedundancy":{ + "shape":"IntegerOptional", + "documentation":"

Specifies whether to create standby instances for the DB shard group. Valid values are the following:

" + }, + "MaxACU":{ + "shape":"DoubleOptional", + "documentation":"

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether the DB shard group is publicly accessible.

When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access is not permitted if the security group assigned to the DB shard group doesn't permit it.

When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address.

Default: The default behavior varies depending on whether DBSubnetGroupName is specified.

If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the following applies:

If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following applies:

" + } + } + }, "CreateDBSnapshotMessage":{ "type":"structure", "required":[ @@ -4947,7 +5077,7 @@ }, "SnsTopicArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.

" + "documentation":"

The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.

RDS doesn't support FIFO (first in, first out) topics. For more information, see Message ordering and deduplication (FIFO topics) in the Amazon Simple Notification Service Developer Guide.

" }, "SourceType":{ "shape":"String", @@ -5510,6 +5640,10 @@ "AwsBackupRecoveryPointArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

" + }, + "LimitlessDatabase":{ + "shape":"LimitlessDatabase", + "documentation":"

The details for Aurora Limitless Database.

" } }, "documentation":"

Contains the details of an Amazon Aurora DB cluster or Multi-AZ DB cluster.

For an Amazon Aurora DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, PromoteReadReplicaDBCluster, RestoreDBClusterFromS3, RestoreDBClusterFromSnapshot, RestoreDBClusterToPointInTime, StartDBCluster, and StopDBCluster.

For a Multi-AZ DB cluster, this data type is used as a response element in the operations CreateDBCluster, DeleteDBCluster, DescribeDBClusters, FailoverDBCluster, ModifyDBCluster, RebootDBCluster, RestoreDBClusterFromSnapshot, and RestoreDBClusterToPointInTime.

For more information on Amazon Aurora DB clusters, see What is Amazon Aurora? in the Amazon Aurora User Guide.

For more information on Multi-AZ DB clusters, see Multi-AZ deployments with two readable standby DB instances in the Amazon RDS User Guide.

", @@ -7876,6 +8010,74 @@ "locationName":"DBSecurityGroup" } }, + "DBShardGroup":{ + "type":"structure", + "members":{ + "DBShardGroupResourceId":{ + "shape":"String", + "documentation":"

The Amazon Web Services Region-unique, immutable identifier for the DB shard group.

" + }, + "DBShardGroupIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB shard group.

" + }, + "DBClusterIdentifier":{ + "shape":"String", + "documentation":"

The name of the primary DB cluster for the DB shard group.

" + }, + "MaxACU":{ + "shape":"DoubleOptional", + "documentation":"

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" + }, + "ComputeRedundancy":{ + "shape":"IntegerOptional", + "documentation":"

Specifies whether to create standby instances for the DB shard group. Valid values are the following:

" + }, + "Status":{ + "shape":"String", + "documentation":"

The status of the DB shard group.

" + }, + "PubliclyAccessible":{ + "shape":"BooleanOptional", + "documentation":"

Indicates whether the DB shard group is publicly accessible.

When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB shard group's VPC. Access to the DB shard group is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB shard group doesn't permit it.

When the DB shard group isn't publicly accessible, it is an internal DB shard group with a DNS name that resolves to a private IP address.

For more information, see CreateDBShardGroup.

This setting is only for Aurora Limitless Database.

" + }, + "Endpoint":{ + "shape":"String", + "documentation":"

The connection endpoint for the DB shard group.

" + } + } + }, + "DBShardGroupAlreadyExistsFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified DB shard group name must be unique in your Amazon Web Services account in the specified Amazon Web Services Region.

", + "error":{ + "code":"DBShardGroupAlreadyExists", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DBShardGroupNotFoundFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified DB shard group name wasn't found.

", + "error":{ + "code":"DBShardGroupNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DBShardGroupsList":{ + "type":"list", + "member":{ + "shape":"DBShardGroup", + "locationName":"DBShardGroup" + } + }, "DBSnapshot":{ "type":"structure", "members":{ @@ -8569,6 +8771,16 @@ }, "documentation":"

" }, + "DeleteDBShardGroupMessage":{ + "type":"structure", + "required":["DBShardGroupIdentifier"], + "members":{ + "DBShardGroupIdentifier":{ + "shape":"String", + "documentation":"

Teh name of the DB shard group to delete.

" + } + } + }, "DeleteDBSnapshotMessage":{ "type":"structure", "required":["DBSnapshotIdentifier"], @@ -8969,7 +9181,7 @@ }, "MaxRecords":{ "shape":"IntegerOptional", - "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100.

" + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100

" }, "Marker":{ "shape":"String", @@ -9402,6 +9614,40 @@ }, "documentation":"

" }, + "DescribeDBShardGroupsMessage":{ + "type":"structure", + "members":{ + "DBShardGroupIdentifier":{ + "shape":"String", + "documentation":"

The user-supplied DB shard group identifier or the Amazon Resource Name (ARN) of the DB shard group. If this parameter is specified, information for only the specific DB shard group is returned. This parameter isn't case-sensitive.

Constraints:

" + }, + "Filters":{ + "shape":"FilterList", + "documentation":"

A filter that specifies one or more DB shard groups to describe.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

An optional pagination token provided by a previous DescribeDBShardGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.

" + }, + "MaxRecords":{ + "shape":"MaxRecords", + "documentation":"

The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results.

Default: 100

Constraints: Minimum 20, maximum 100

" + } + } + }, + "DescribeDBShardGroupsResponse":{ + "type":"structure", + "members":{ + "DBShardGroups":{ + "shape":"DBShardGroupsList", + "documentation":"

Contains a list of DB shard groups for the user.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

A pagination token that can be used in a later DescribeDBClusters request.

" + } + } + }, "DescribeDBSnapshotAttributesMessage":{ "type":"structure", "required":["DBSnapshotIdentifier"], @@ -10693,7 +10939,7 @@ "documentation":"

One or more filter values. Filter values are case-sensitive.

" } }, - "documentation":"

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as IDs. The filters supported by a describe operation are documented with the describe operation.

Currently, wildcards are not supported in filters.

The following actions can be filtered:

" + "documentation":"

A filter name and value pair that is used to return a more specific list of results from a describe operation. Filters can be used to match a set of resources by specific criteria, such as IDs. The filters supported by a describe operation are documented with the describe operation.

Currently, wildcards are not supported in filters.

The following actions can be filtered:

" }, "FilterList":{ "type":"list", @@ -11284,6 +11530,18 @@ }, "exception":true }, + "InvalidDBShardGroupStateFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The DB shard group must be in the available state.

", + "error":{ + "code":"InvalidDBShardGroupState", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidDBSnapshotStateFault":{ "type":"structure", "members":{ @@ -11404,6 +11662,18 @@ }, "exception":true }, + "InvalidMaxAcuFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The maximum capacity of the DB shard group must be 48-7168 Aurora capacity units (ACUs).

", + "error":{ + "code":"InvalidMaxAcu", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "InvalidOptionGroupStateFault":{ "type":"structure", "members":{ @@ -11508,6 +11778,33 @@ "min":1, "pattern":"[a-zA-Z0-9_:\\-\\/]+" }, + "LimitlessDatabase":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"LimitlessDatabaseStatus", + "documentation":"

The status of Aurora Limitless Database.

" + }, + "MinRequiredACU":{ + "shape":"DoubleOptional", + "documentation":"

The minimum required capacity for Aurora Limitless Database in Aurora capacity units (ACUs).

" + } + }, + "documentation":"

Contains details for Aurora Limitless Database.

" + }, + "LimitlessDatabaseStatus":{ + "type":"string", + "enum":[ + "active", + "not-in-use", + "enabled", + "disabled", + "enabling", + "disabling", + "modifying-max-capacity", + "error" + ] + }, "ListTagsForResourceMessage":{ "type":"structure", "required":["ResourceName"], @@ -11562,6 +11859,18 @@ }, "documentation":"

Contains the secret managed by RDS in Amazon Web Services Secrets Manager for the master user password.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide and Password management with Amazon Web Services Secrets Manager in the Amazon Aurora User Guide.

" }, + "MaxDBShardGroupLimitReached":{ + "type":"structure", + "members":{ + }, + "documentation":"

The maximum number of DB shard groups for your Amazon Web Services account in the specified Amazon Web Services Region has been reached.

", + "error":{ + "code":"MaxDBShardGroupLimitReached", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "MaxRecords":{ "type":"integer", "max":100, @@ -11944,6 +12253,10 @@ "AwsBackupRecoveryPointArn":{ "shape":"AwsBackupRecoveryPointArn", "documentation":"

The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

" + }, + "EnableLimitlessDatabase":{ + "shape":"BooleanOptional", + "documentation":"

Specifies whether to enable Aurora Limitless Database. You must enable Aurora Limitless Database to create a DB shard group.

Valid for: Aurora DB clusters only

" } }, "documentation":"

" @@ -12405,6 +12718,20 @@ } } }, + "ModifyDBShardGroupMessage":{ + "type":"structure", + "required":["DBShardGroupIdentifier"], + "members":{ + "DBShardGroupIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB shard group to modify.

" + }, + "MaxACU":{ + "shape":"DoubleOptional", + "documentation":"

The maximum capacity of the DB shard group in Aurora capacity units (ACUs).

" + } + } + }, "ModifyDBSnapshotAttributeMessage":{ "type":"structure", "required":[ @@ -13748,6 +14075,16 @@ "DBInstance":{"shape":"DBInstance"} } }, + "RebootDBShardGroupMessage":{ + "type":"structure", + "required":["DBShardGroupIdentifier"], + "members":{ + "DBShardGroupIdentifier":{ + "shape":"String", + "documentation":"

The name of the DB shard group to reboot.

" + } + } + }, "RecommendedAction":{ "type":"structure", "members":{ @@ -16302,6 +16639,18 @@ }, "documentation":"

A time zone associated with a DBInstance or a DBSnapshot. This data type is an element in the response to the DescribeDBInstances, the DescribeDBSnapshots, and the DescribeDBEngineVersions actions.

" }, + "UnsupportedDBEngineVersionFault":{ + "type":"structure", + "members":{ + }, + "documentation":"

The specified DB engine version isn't supported for Aurora Limitless Database.

", + "error":{ + "code":"UnsupportedDBEngineVersion", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "UpgradeTarget":{ "type":"structure", "members":{ diff -Nru awscli-2.15.9/awscli/botocore/data/redshift/2012-12-01/paginators-1.json awscli-2.15.22/awscli/botocore/data/redshift/2012-12-01/paginators-1.json --- awscli-2.15.9/awscli/botocore/data/redshift/2012-12-01/paginators-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/redshift/2012-12-01/paginators-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -209,6 +209,12 @@ "limit_key": "MaxRecords", "output_token": "Marker", "result_key": "RedshiftIdcApplications" + }, + "ListRecommendations": { + "input_token": "Marker", + "limit_key": "MaxRecords", + "output_token": "Marker", + "result_key": "Recommendations" } } } diff -Nru awscli-2.15.9/awscli/botocore/data/redshift/2012-12-01/service-2.json awscli-2.15.22/awscli/botocore/data/redshift/2012-12-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/redshift/2012-12-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/redshift/2012-12-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1760,6 +1760,23 @@ ], "documentation":"

Get the resource policy for a specified resource.

" }, + "ListRecommendations":{ + "name":"ListRecommendations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRecommendationsMessage"}, + "output":{ + "shape":"ListRecommendationsResult", + "resultWrapper":"ListRecommendationsResult" + }, + "errors":[ + {"shape":"ClusterNotFoundFault"}, + {"shape":"UnsupportedOperationFault"} + ], + "documentation":"

List the Amazon Redshift Advisor recommendations for one or multiple Amazon Redshift clusters in an Amazon Web Services account.

" + }, "ModifyAquaConfiguration":{ "name":"ModifyAquaConfiguration", "http":{ @@ -7264,6 +7281,14 @@ "min":1, "pattern":"^[a-zA-Z0-9_+.#@$-]+$" }, + "ImpactRankingType":{ + "type":"string", + "enum":[ + "HIGH", + "MEDIUM", + "LOW" + ] + }, "ImportTablesCompleted":{ "type":"list", "member":{"shape":"String"} @@ -7832,6 +7857,40 @@ }, "exception":true }, + "ListRecommendationsMessage":{ + "type":"structure", + "members":{ + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The unique identifier of the Amazon Redshift cluster for which the list of Advisor recommendations is returned. If the neither the cluster identifier and the cluster namespace ARN parameters are specified, then recommendations for all clusters in the account are returned.

" + }, + "NamespaceArn":{ + "shape":"String", + "documentation":"

The Amazon Redshift cluster namespace Amazon Resource Name (ARN) for which the list of Advisor recommendations is returned. If the neither the cluster identifier and the cluster namespace ARN parameters are specified, then recommendations for all clusters in the account are returned.

" + }, + "MaxRecords":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

" + } + } + }, + "ListRecommendationsResult":{ + "type":"structure", + "members":{ + "Recommendations":{ + "shape":"RecommendationList", + "documentation":"

The Advisor recommendations for action on the Amazon Redshift cluster.

" + }, + "Marker":{ + "shape":"String", + "documentation":"

A value that indicates the starting point for the next set of response records in a subsequent request. If a value is returned in a response, you can retrieve the next set of records by providing this returned marker value in the Marker parameter and retrying the command. If the Marker field is empty, all response records have been retrieved for the request.

" + } + } + }, "LogDestinationType":{ "type":"string", "enum":[ @@ -9013,6 +9072,103 @@ "Cluster":{"shape":"Cluster"} } }, + "Recommendation":{ + "type":"structure", + "members":{ + "Id":{ + "shape":"String", + "documentation":"

A unique identifier of the Advisor recommendation.

" + }, + "ClusterIdentifier":{ + "shape":"String", + "documentation":"

The unique identifier of the cluster for which the recommendation is returned.

" + }, + "NamespaceArn":{ + "shape":"String", + "documentation":"

The Amazon Redshift cluster namespace ARN for which the recommendations is returned.

" + }, + "CreatedAt":{ + "shape":"TStamp", + "documentation":"

The date and time (UTC) that the recommendation was created.

" + }, + "RecommendationType":{ + "shape":"String", + "documentation":"

The type of Advisor recommendation.

" + }, + "Title":{ + "shape":"String", + "documentation":"

The title of the recommendation.

" + }, + "Description":{ + "shape":"String", + "documentation":"

The description of the recommendation.

" + }, + "Observation":{ + "shape":"String", + "documentation":"

The description of what was observed about your cluster.

" + }, + "ImpactRanking":{ + "shape":"ImpactRankingType", + "documentation":"

The scale of the impact that the Advisor recommendation has to the performance and cost of the cluster.

" + }, + "RecommendationText":{ + "shape":"String", + "documentation":"

The description of the recommendation.

" + }, + "RecommendedActions":{ + "shape":"RecommendedActionList", + "documentation":"

List of Amazon Redshift recommended actions.

" + }, + "ReferenceLinks":{ + "shape":"ReferenceLinkList", + "documentation":"

List of helpful links for more information about the Advisor recommendation.

" + } + }, + "documentation":"

An Amazon Redshift Advisor recommended action on the Amazon Redshift cluster.

" + }, + "RecommendationList":{ + "type":"list", + "member":{ + "shape":"Recommendation", + "locationName":"Recommendation" + } + }, + "RecommendedAction":{ + "type":"structure", + "members":{ + "Text":{ + "shape":"String", + "documentation":"

The specific instruction about the command.

" + }, + "Database":{ + "shape":"String", + "documentation":"

The database name to perform the action on. Only applicable if the type of command is SQL.

" + }, + "Command":{ + "shape":"String", + "documentation":"

The command to run.

" + }, + "Type":{ + "shape":"RecommendedActionType", + "documentation":"

The type of command.

" + } + }, + "documentation":"

The recommended action from the Amazon Redshift Advisor recommendation.

" + }, + "RecommendedActionList":{ + "type":"list", + "member":{ + "shape":"RecommendedAction", + "locationName":"RecommendedAction" + } + }, + "RecommendedActionType":{ + "type":"string", + "enum":[ + "SQL", + "CLI" + ] + }, "RecurringCharge":{ "type":"structure", "members":{ @@ -9128,6 +9284,27 @@ }, "exception":true }, + "ReferenceLink":{ + "type":"structure", + "members":{ + "Text":{ + "shape":"String", + "documentation":"

The hyperlink text that describes the link to more information.

" + }, + "Link":{ + "shape":"String", + "documentation":"

The URL address to find more information.

" + } + }, + "documentation":"

A link to an Amazon Redshift Advisor reference for more information about a recommendation.

" + }, + "ReferenceLinkList":{ + "type":"list", + "member":{ + "shape":"ReferenceLink", + "locationName":"ReferenceLink" + } + }, "RejectDataShareMessage":{ "type":"structure", "required":["DataShareArn"], diff -Nru awscli-2.15.9/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json awscli-2.15.22/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json --- awscli-2.15.9/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -975,7 +975,7 @@ "members":{ "parameterKey":{ "shape":"ParameterKey", - "documentation":"

The key of the parameter. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

The key of the parameter. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "parameterValue":{ "shape":"ParameterValue", @@ -1377,7 +1377,7 @@ }, "configParameters":{ "shape":"ConfigParameterList", - "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "enhancedVpcRouting":{ "shape":"Boolean", @@ -3561,7 +3561,7 @@ }, "configParameters":{ "shape":"ConfigParameterList", - "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "enhancedVpcRouting":{ "shape":"Boolean", @@ -3733,7 +3733,7 @@ }, "configParameters":{ "shape":"ConfigParameterList", - "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, use_fips_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" + "documentation":"

An array of parameters to set for advanced control over a database. The options are auto_mv, datestyle, enable_case_sensitive_identifier, enable_user_activity_logging, query_group, search_path, require_ssl, and query monitoring metrics that let you define performance boundaries. For more information about query monitoring rules and available metrics, see Query monitoring metrics for Amazon Redshift Serverless.

" }, "creationDate":{ "shape":"SyntheticTimestamp_date_time", diff -Nru awscli-2.15.9/awscli/botocore/data/rekognition/2016-06-27/service-2.json awscli-2.15.22/awscli/botocore/data/rekognition/2016-06-27/service-2.json --- awscli-2.15.9/awscli/botocore/data/rekognition/2016-06-27/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/rekognition/2016-06-27/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -477,7 +477,7 @@ {"shape":"ProvisionedThroughputExceededException"}, {"shape":"InvalidImageFormatException"} ], - "documentation":"

This operation applies only to Amazon Rekognition Custom Labels.

Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

You specify which version of a model version to use by using the ProjectVersionArn input parameter.

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry).

To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectCustomLabels action.

For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

" + "documentation":"

This operation applies only to Amazon Rekognition Custom Labels.

Detects custom labels in a supplied image by using an Amazon Rekognition Custom Labels model.

You specify which version of a model version to use by using the ProjectVersionArn input parameter.

You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file.

For each object that the model version detects on an image, the API returns a (CustomLabel) object in an array (CustomLabels). Each CustomLabel object provides the label name (Name), the level of confidence that the image contains the object (Confidence), and object location information, if it exists, for the label on the image (Geometry). Note that for the DetectCustomLabelsLabels operation, Polygons are not returned in the Geometry section of the response.

To filter labels that are returned, specify a value for MinConfidence. DetectCustomLabelsLabels only returns labels with a confidence that's higher than the specified value. The value of MinConfidence maps to the assumed threshold values created during training. For more information, see Assumed threshold in the Amazon Rekognition Custom Labels Developer Guide. Amazon Rekognition Custom Labels metrics expresses an assumed threshold as a floating point value between 0-1. The range of MinConfidence normalizes the threshold value to a percentage value (0-100). Confidence responses from DetectCustomLabels are also returned as a percentage. You can use MinConfidence to change the precision and recall or your model. For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

If you don't specify a value for MinConfidence, DetectCustomLabels returns labels based on the assumed threshold of each label.

This is a stateless API operation. That is, the operation does not persist any data.

This operation requires permissions to perform the rekognition:DetectCustomLabels action.

For more information, see Analyzing an image in the Amazon Rekognition Custom Labels Developer Guide.

" }, "DetectFaces":{ "name":"DetectFaces", @@ -1567,7 +1567,7 @@ "members":{ "AssociatedFaces":{ "shape":"AssociatedFacesList", - "documentation":"

An array of AssociatedFace objects containing FaceIDs that are successfully associated with the UserID is returned. Returned if the AssociateFaces action is successful.

" + "documentation":"

An array of AssociatedFace objects containing FaceIDs that have been successfully associated with the UserID. Returned if the AssociateFaces action is successful.

" }, "UnsuccessfulFaceAssociations":{ "shape":"UnsuccessfulFaceAssociationList", @@ -2068,6 +2068,26 @@ "TIMESTAMP" ] }, + "ContentType":{ + "type":"structure", + "members":{ + "Confidence":{ + "shape":"Percent", + "documentation":"

The confidence level of the label given

" + }, + "Name":{ + "shape":"String", + "documentation":"

The name of the label

" + } + }, + "documentation":"

Contains information regarding the confidence and name of a detected content type.

" + }, + "ContentTypes":{ + "type":"list", + "member":{"shape":"ContentType"}, + "max":50, + "min":0 + }, "CopyProjectVersionRequest":{ "type":"structure", "required":[ @@ -3272,6 +3292,10 @@ "ProjectVersion":{ "shape":"ProjectVersionId", "documentation":"

Identifier of the custom adapter that was used during inference. If during inference the adapter was EXPIRED, then the parameter will not be returned, indicating that a base moderation detection project version was used.

" + }, + "ContentTypes":{ + "shape":"ContentTypes", + "documentation":"

A list of predicted results for the type of content an image contains. For example, the image content might be from animation, sports, or a video game.

" } } }, @@ -5657,6 +5681,16 @@ }, "documentation":"

Summary that provides statistics on input manifest and errors identified in the input manifest.

" }, + "MediaAnalysisModelVersions":{ + "type":"structure", + "members":{ + "Moderation":{ + "shape":"String", + "documentation":"

The Moderation base model version.

" + } + }, + "documentation":"

Object containing information about the model versions of selected features in a given job.

" + }, "MediaAnalysisOperationsConfig":{ "type":"structure", "members":{ @@ -5685,7 +5719,11 @@ "MediaAnalysisResults":{ "type":"structure", "members":{ - "S3Object":{"shape":"S3Object"} + "S3Object":{"shape":"S3Object"}, + "ModelVersions":{ + "shape":"MediaAnalysisModelVersions", + "documentation":"

Information about the model versions for the features selected in a given job.

" + } }, "documentation":"

Contains the results for a media analysis job created with StartMediaAnalysisJob.

" }, @@ -5713,6 +5751,10 @@ "ParentName":{ "shape":"String", "documentation":"

The name for the parent label. Labels at the top level of the hierarchy have the parent label \"\".

" + }, + "TaxonomyLevel":{ + "shape":"UInteger", + "documentation":"

The level of the moderation label with regard to its taxonomy, from 1 to 3.

" } }, "documentation":"

Provides information about a single type of inappropriate, unwanted, or offensive content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Content moderation in the Amazon Rekognition Developer Guide.

" @@ -8071,5 +8113,5 @@ "exception":true } }, - "documentation":"

This is the API Reference for Amazon Rekognition Image, Amazon Rekognition Custom Labels, Amazon Rekognition Stored Video, Amazon Rekognition Streaming Video. It provides descriptions of actions, data types, common parameters, and common errors.

Amazon Rekognition Image

Amazon Rekognition Custom Labels

Amazon Rekognition Video Stored Video

Amazon Rekognition Video Streaming Video

" + "documentation":"

This is the API Reference for Amazon Rekognition Image, Amazon Rekognition Custom Labels, Amazon Rekognition Stored Video, Amazon Rekognition Streaming Video. It provides descriptions of actions, data types, common parameters, and common errors.

Amazon Rekognition Image

Amazon Rekognition Custom Labels

Amazon Rekognition Video Stored Video

Amazon Rekognition Video Streaming Video

" } diff -Nru awscli-2.15.9/awscli/botocore/data/resource-explorer-2/2022-07-28/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/resource-explorer-2/2022-07-28/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/resource-explorer-2/2022-07-28/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/resource-explorer-2/2022-07-28/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -7,6 +7,13 @@ "documentation": "The AWS region used to dispatch the request.", "type": "String" }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, "UseFIPS": { "builtIn": "AWS::UseFIPS", "required": true, @@ -50,6 +57,21 @@ "type": "error" }, { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { "conditions": [], "endpoint": { "url": { @@ -93,16 +115,19 @@ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], @@ -112,61 +137,51 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] - } - ], - "rules": [ + }, { - "conditions": [ + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "PartitionResult" }, - true + "supportsDualStack" ] } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://resource-explorer-2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, + ] + } + ], + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://resource-explorer-2-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" }, { "conditions": [], - "endpoint": { - "url": "https://resource-explorer-2.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" @@ -221,6 +236,58 @@ "type": "error" } ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://resource-explorer-2.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], "type": "tree" }, { diff -Nru awscli-2.15.9/awscli/botocore/data/route53/2013-04-01/service-2.json awscli-2.15.22/awscli/botocore/data/route53/2013-04-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/route53/2013-04-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/route53/2013-04-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1196,6 +1196,11 @@ "pattern":".*\\S.*" }, "AWSAccountID":{"type":"string"}, + "AWSRegion":{ + "type":"string", + "max":64, + "min":1 + }, "AccountLimit":{ "type":"structure", "required":[ @@ -1286,7 +1291,7 @@ "members":{ "HostedZoneId":{ "shape":"ResourceId", - "documentation":"

Alias resource records sets only: The value used depends on where you want to route traffic:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the hosted zone ID for your API. You can get the applicable value using the CLI command get-domain-names:

  • For regional APIs, specify the value of regionalHostedZoneId.

  • For edge-optimized APIs, specify the value of distributionHostedZoneId.

Amazon Virtual Private Cloud interface VPC endpoint

Specify the hosted zone ID for your interface endpoint. You can get the value of HostedZoneId using the CLI command describe-vpc-endpoints.

CloudFront distribution

Specify Z2FDTNDATAQYW2.

Alias resource record sets for CloudFront can't be created in a private zone.

Elastic Beanstalk environment

Specify the hosted zone ID for the region that you created the environment in. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see Elastic Beanstalk endpoints and quotas in the the Amazon Web Services General Reference.

ELB load balancer

Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:

  • Elastic Load Balancing endpoints and quotas topic in the Amazon Web Services General Reference: Use the value that corresponds with the region that you created your load balancer in. Note that there are separate columns for Application and Classic Load Balancers and for Network Load Balancers.

  • Amazon Web Services Management Console: Go to the Amazon EC2 page, choose Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable value. For more information, see the applicable guide:

  • CLI: Use describe-load-balancers to get the applicable value. For more information, see the applicable guide:

Global Accelerator accelerator

Specify Z2BJ6XQ5FK7U4H.

An Amazon S3 bucket configured as a static website

Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference.

Another Route 53 resource record set in your hosted zone

Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)

" + "documentation":"

Alias resource records sets only: The value used depends on where you want to route traffic:

Amazon API Gateway custom regional APIs and edge-optimized APIs

Specify the hosted zone ID for your API. You can get the applicable value using the CLI command get-domain-names:

  • For regional APIs, specify the value of regionalHostedZoneId.

  • For edge-optimized APIs, specify the value of distributionHostedZoneId.

Amazon Virtual Private Cloud interface VPC endpoint

Specify the hosted zone ID for your interface endpoint. You can get the value of HostedZoneId using the CLI command describe-vpc-endpoints.

CloudFront distribution

Specify Z2FDTNDATAQYW2.

Alias resource record sets for CloudFront can't be created in a private zone.

Elastic Beanstalk environment

Specify the hosted zone ID for the region that you created the environment in. The environment must have a regionalized subdomain. For a list of regions and the corresponding hosted zone IDs, see Elastic Beanstalk endpoints and quotas in the Amazon Web Services General Reference.

ELB load balancer

Specify the value of the hosted zone ID for the load balancer. Use the following methods to get the hosted zone ID:

  • Elastic Load Balancing endpoints and quotas topic in the Amazon Web Services General Reference: Use the value that corresponds with the region that you created your load balancer in. Note that there are separate columns for Application and Classic Load Balancers and for Network Load Balancers.

  • Amazon Web Services Management Console: Go to the Amazon EC2 page, choose Load Balancers in the navigation pane, select the load balancer, and get the value of the Hosted zone field on the Description tab.

  • Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable value. For more information, see the applicable guide:

  • CLI: Use describe-load-balancers to get the applicable value. For more information, see the applicable guide:

Global Accelerator accelerator

Specify Z2BJ6XQ5FK7U4H.

An Amazon S3 bucket configured as a static website

Specify the hosted zone ID for the region that you created the bucket in. For more information about valid values, see the table Amazon S3 Website Endpoints in the Amazon Web Services General Reference.

Another Route 53 resource record set in your hosted zone

Specify the hosted zone ID of your hosted zone. (An alias resource record set can't reference a resource record set in a different hosted zone.)

" }, "DNSName":{ "shape":"DNSName", @@ -1335,6 +1340,11 @@ }, "documentation":"

A complex type that contains the response information for the AssociateVPCWithHostedZone request.

" }, + "Bias":{ + "type":"integer", + "max":99, + "min":-99 + }, "Change":{ "type":"structure", "required":[ @@ -1865,6 +1875,24 @@ "error":{"httpStatusCode":400}, "exception":true }, + "Coordinates":{ + "type":"structure", + "required":[ + "Latitude", + "Longitude" + ], + "members":{ + "Latitude":{ + "shape":"Latitude", + "documentation":"

Specifies a coordinate of the north–south position of a geographic point on the surface of the Earth (-90 - 90).

" + }, + "Longitude":{ + "shape":"Longitude", + "documentation":"

Specifies a coordinate of the east–west position of a geographic point on the surface of the Earth (-180 - 180).

" + } + }, + "documentation":"

A complex type that lists the coordinates for a geoproximity resource record.

" + }, "CreateCidrCollectionRequest":{ "type":"structure", "required":[ @@ -2324,7 +2352,7 @@ "documentation":"

The status message provided for the following DNSSEC signing status: INTERNAL_FAILURE. The status message includes information about what the problem might be and steps that you can take to correct the issue.

" } }, - "documentation":"

A string repesenting the status of DNSSEC signing.

" + "documentation":"

A string representing the status of DNSSEC signing.

" }, "DeactivateKeySigningKeyRequest":{ "type":"structure", @@ -2777,7 +2805,7 @@ }, "CountryCode":{ "shape":"GeoLocationCountryCode", - "documentation":"

For geolocation resource record sets, the two-letter code for a country.

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2.

Route 53 also supports the contry code UA forr Ukraine.

" + "documentation":"

For geolocation resource record sets, the two-letter code for a country.

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2.

Route 53 also supports the country code UA for Ukraine.

" }, "SubdivisionCode":{ "shape":"GeoLocationSubdivisionCode", @@ -2853,6 +2881,28 @@ "max":64, "min":1 }, + "GeoProximityLocation":{ + "type":"structure", + "members":{ + "AWSRegion":{ + "shape":"AWSRegion", + "documentation":"

The Amazon Web Services Region the resource you are directing DNS traffic to, is in.

" + }, + "LocalZoneGroup":{ + "shape":"LocalZoneGroup", + "documentation":"

Specifies an Amazon Web Services Local Zone Group.

A local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is us-east-1-bue-1a the Local Zone Group is us-east-1-bue-1.

You can identify the Local Zones Group for a specific Local Zone by using the describe-availability-zones CLI command:

This command returns: \"GroupName\": \"us-west-2-den-1\", specifying that the Local Zone us-west-2-den-1a belongs to the Local Zone Group us-west-2-den-1.

" + }, + "Coordinates":{ + "shape":"Coordinates", + "documentation":"

Contains the longitude and latitude for a geographic region.

" + }, + "Bias":{ + "shape":"Bias", + "documentation":"

The bias increases or decreases the size of the geographic region from which Route 53 routes traffic to a resource.

To use Bias to change the size of the geographic region, specify the applicable value for the bias:

" + } + }, + "documentation":"

(Resource record sets only): A complex type that lets you specify where your resources are located. Only one of LocalZoneGroup, Coordinates, or Amazon Web ServicesRegion is allowed per request at a time.

For more information about geoproximity routing, see Geoproximity routing in the Amazon Route 53 Developer Guide.

" + }, "GetAccountLimitRequest":{ "type":"structure", "required":["Type"], @@ -2946,7 +2996,7 @@ "members":{ "Status":{ "shape":"DNSSECStatus", - "documentation":"

A string repesenting the status of DNSSEC.

" + "documentation":"

A string representing the status of DNSSEC.

" }, "KeySigningKeys":{ "shape":"KeySigningKeys", @@ -2965,7 +3015,7 @@ }, "CountryCode":{ "shape":"GeoLocationCountryCode", - "documentation":"

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2.

Route 53 also supports the contry code UA forr Ukraine.

", + "documentation":"

Amazon Route 53 uses the two-letter country codes that are specified in ISO standard 3166-1 alpha-2.

Route 53 also supports the country code UA for Ukraine.

", "location":"querystring", "locationName":"countrycode" }, @@ -3389,7 +3439,7 @@ }, "Type":{ "shape":"HealthCheckType", - "documentation":"

The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide.

" + "documentation":"

The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy.

You can't change the value of Type after you create a health check.

You can create the following types of health checks:

For more information, see How Route 53 Determines Whether an Endpoint Is Healthy in the Amazon Route 53 Developer Guide.

" }, "ResourcePath":{ "shape":"ResourcePath", @@ -4021,6 +4071,12 @@ "error":{"httpStatusCode":400}, "exception":true }, + "Latitude":{ + "type":"string", + "max":6, + "min":1, + "pattern":"[-+]?[0-9]{1,2}(\\.[0-9]{0,2})?" + }, "LimitValue":{ "type":"long", "min":1 @@ -5031,6 +5087,11 @@ }, "documentation":"

A complex type that contains the response information for the request.

" }, + "LocalZoneGroup":{ + "type":"string", + "max":64, + "min":1 + }, "LocationSummaries":{ "type":"list", "member":{"shape":"LocationSummary"} @@ -5045,6 +5106,12 @@ }, "documentation":"

A complex type that contains information about the CIDR location.

" }, + "Longitude":{ + "type":"string", + "max":7, + "min":1, + "pattern":"[-+]?[0-9]{1,3}(\\.[0-9]{0,2})?" + }, "MaxResults":{"type":"string"}, "MeasureLatency":{"type":"boolean"}, "Message":{ @@ -5403,7 +5470,7 @@ }, "GeoLocation":{ "shape":"GeoLocation", - "documentation":"

Geolocation resource record sets only: A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of 192.0.2.111, create a resource record set with a Type of A and a ContinentCode of AF.

Although creating geolocation and geolocation alias resource record sets in a private hosted zone is allowed, it's not supported.

If you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.

You can't create two geolocation resource record sets that specify the same geographic location.

The value * in the CountryCode element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the Name and Type elements.

Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of CountryCode is *. Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a * resource record set, Route 53 returns a \"no answer\" response for queries from those locations.

You can't create non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation resource record sets.

" + "documentation":"

Geolocation resource record sets only: A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of 192.0.2.111, create a resource record set with a Type of A and a ContinentCode of AF.

If you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.

You can't create two geolocation resource record sets that specify the same geographic location.

The value * in the CountryCode element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the Name and Type elements.

Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of CountryCode is *. Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a * resource record set, Route 53 returns a \"no answer\" response for queries from those locations.

You can't create non-geolocation resource record sets that have the same values for the Name and Type elements as geolocation resource record sets.

" }, "Failover":{ "shape":"ResourceRecordSetFailover", @@ -5433,7 +5500,11 @@ "shape":"TrafficPolicyInstanceId", "documentation":"

When you create a traffic policy instance, Amazon Route 53 automatically creates a resource record set. TrafficPolicyInstanceId is the ID of the traffic policy instance that Route 53 created this resource record set for.

To delete the resource record set that is associated with a traffic policy instance, use DeleteTrafficPolicyInstance. Route 53 will delete the resource record set automatically. If you delete the resource record set by using ChangeResourceRecordSets, Route 53 doesn't automatically delete the traffic policy instance, and you'll continue to be charged for it even though it's no longer in use.

" }, - "CidrRoutingConfig":{"shape":"CidrRoutingConfig"} + "CidrRoutingConfig":{"shape":"CidrRoutingConfig"}, + "GeoProximityLocation":{ + "shape":"GeoProximityLocation", + "documentation":"

GeoproximityLocation resource record sets only: A complex type that lets you control how Route 53 responds to DNS queries based on the geographic origin of the query and your resources.

" + } }, "documentation":"

Information about the resource record set to create or delete.

" }, @@ -6112,7 +6183,7 @@ }, "FullyQualifiedDomainName":{ "shape":"FullyQualifiedDomainName", - "documentation":"

Amazon Route 53 behavior depends on whether you specify a value for IPAddress.

If a health check already has a value for IPAddress, you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress.

If you specify a value for IPAddress:

Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint on which you want Route 53 to perform health checks.

When Route 53 checks the health of an endpoint, here is how it constructs the Host header:

If you don't specify a value for FullyQualifiedDomainName, Route 53 substitutes the value of IPAddress in the Host header in each of the above cases.

If you don't specify a value for IPAddress:

If you don't specify a value for IPAddress, Route 53 sends a DNS request to the domain that you specify in FullyQualifiedDomainName at the interval you specify in RequestInterval. Using an IPv4 address that is returned by DNS, Route 53 then checks the health of the endpoint.

If you don't specify a value for IPAddress, Route 53 uses only IPv4 to send health checks to the endpoint. If there's no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName, the health check fails with a \"DNS resolution failed\" error.

If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName, we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName, specify the domain name of the server (such as us-east-2-www.example.com), not the name of the resource record sets (www.example.com).

In this configuration, if the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable.

In addition, if the value of Type is HTTP, HTTPS, HTTP_STR_MATCH, or HTTPS_STR_MATCH, Route 53 passes the value of FullyQualifiedDomainName in the Host header, as it does when you specify a value for IPAddress. If the value of Type is TCP, Route 53 doesn't pass a Host header.

" + "documentation":"

Amazon Route 53 behavior depends on whether you specify a value for IPAddress.

If a health check already has a value for IPAddress, you can change the value. However, you can't update an existing health check to add or remove the value of IPAddress.

If you specify a value for IPAddress:

Route 53 sends health check requests to the specified IPv4 or IPv6 address and passes the value of FullyQualifiedDomainName in the Host header for all health checks except TCP health checks. This is typically the fully qualified DNS name of the endpoint on which you want Route 53 to perform health checks.

When Route 53 checks the health of an endpoint, here is how it constructs the Host header:

If you don't specify a value for FullyQualifiedDomainName, Route 53 substitutes the value of IPAddress in the Host header in each of the above cases.

If you don't specify a value for IPAddress:

If you don't specify a value for IPAddress, Route 53 sends a DNS request to the domain that you specify in FullyQualifiedDomainName at the interval you specify in RequestInterval. Using an IPv4 address that is returned by DNS, Route 53 then checks the health of the endpoint.

If you don't specify a value for IPAddress, you can’t update the health check to remove the FullyQualifiedDomainName; if you don’t specify a value for IPAddress on creation, a FullyQualifiedDomainName is required.

If you don't specify a value for IPAddress, Route 53 uses only IPv4 to send health checks to the endpoint. If there's no resource record set with a type of A for the name that you specify for FullyQualifiedDomainName, the health check fails with a \"DNS resolution failed\" error.

If you want to check the health of weighted, latency, or failover resource record sets and you choose to specify the endpoint only by FullyQualifiedDomainName, we recommend that you create a separate health check for each endpoint. For example, create a health check for each HTTP server that is serving content for www.example.com. For the value of FullyQualifiedDomainName, specify the domain name of the server (such as us-east-2-www.example.com), not the name of the resource record sets (www.example.com).

In this configuration, if the value of FullyQualifiedDomainName matches the name of the resource record sets and you then associate the health check with those resource record sets, health check results will be unpredictable.

In addition, if the value of Type is HTTP, HTTPS, HTTP_STR_MATCH, or HTTPS_STR_MATCH, Route 53 passes the value of FullyQualifiedDomainName in the Host header, as it does when you specify a value for IPAddress. If the value of Type is TCP, Route 53 doesn't pass a Host header.

" }, "SearchString":{ "shape":"SearchString", diff -Nru awscli-2.15.9/awscli/botocore/data/route53domains/2014-05-15/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/route53domains/2014-05-15/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/route53domains/2014-05-15/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/route53domains/2014-05-15/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/route53domains/2014-05-15/service-2.json awscli-2.15.22/awscli/botocore/data/route53domains/2014-05-15/service-2.json --- awscli-2.15.9/awscli/botocore/data/route53domains/2014-05-15/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/route53domains/2014-05-15/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -432,7 +432,7 @@ {"shape":"DomainLimitExceeded"}, {"shape":"OperationLimitExceeded"} ], - "documentation":"

Transfers a domain from another registrar to Amazon Route 53.

For more information about transferring domains, see the following topics:

If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

" + "documentation":"

Transfers a domain from another registrar to Amazon Route 53.

For more information about transferring domains, see the following topics:

During the transfer of any country code top-level domains (ccTLDs) to Route 53, except for .cc and .tv, updates to the owner contact are ignored and the owner contact data from the registry is used. You can update the owner contact after the transfer is complete. For more information, see UpdateDomainContact.

If the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you transfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars provide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar will not renew your domain registration and could end your DNS service at any time.

If the registrar for your domain is also the DNS service provider for the domain and you don't transfer DNS service to another provider, your website, email, and the web applications associated with the domain might become unavailable.

If the transfer is successful, this method returns an operation ID that you can use to track the progress and completion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.

" }, "TransferDomainToAnotherAwsAccount":{ "name":"TransferDomainToAnotherAwsAccount", @@ -668,7 +668,7 @@ "members":{ "Availability":{ "shape":"DomainAvailability", - "documentation":"

Whether the domain name is available for registering.

You can register only domains designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLE_RESERVED

The domain name is reserved under specific conditions.

AVAILABLE_PREORDER

The domain name is available and can be preordered.

DONT_KNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLE_PREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

" + "documentation":"

Whether the domain name is available for registering.

You can register only domains designated as AVAILABLE.

Valid values:

AVAILABLE

The domain name is available.

AVAILABLE_RESERVED

The domain name is reserved under specific conditions.

AVAILABLE_PREORDER

The domain name is available and can be preordered.

DONT_KNOW

The TLD registry didn't reply with a definitive answer about whether the domain name is available. Route 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. Try again later.

INVALID_NAME_FOR_TLD

The TLD isn't valid. For example, it can contain characters that aren't allowed.

PENDING

The TLD registry didn't return a response in the expected amount of time. When the response is delayed, it usually takes just a few extra seconds. You can resubmit the request immediately.

RESERVED

The domain name has been reserved for another person or organization.

UNAVAILABLE

The domain name is not available.

UNAVAILABLE_PREMIUM

The domain name is not available.

UNAVAILABLE_RESTRICTED

The domain name is forbidden.

" } }, "documentation":"

The CheckDomainAvailability response includes the following elements.

" @@ -1259,7 +1259,9 @@ "UNAVAILABLE_PREMIUM", "UNAVAILABLE_RESTRICTED", "RESERVED", - "DONT_KNOW" + "DONT_KNOW", + "INVALID_NAME_FOR_TLD", + "PENDING" ] }, "DomainLimitExceeded":{ @@ -1592,19 +1594,19 @@ }, "AdminPrivacy":{ "shape":"Boolean", - "documentation":"

Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If the value is false, WHOIS queries return the information that you entered for the admin contact.

" + "documentation":"

Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the admin contact.

" }, "RegistrantPrivacy":{ "shape":"Boolean", - "documentation":"

Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If the value is false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

" + "documentation":"

Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

" }, "TechPrivacy":{ "shape":"Boolean", - "documentation":"

Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If the value is false, WHOIS queries return the information that you entered for the technical contact.

" + "documentation":"

Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the technical contact.

" }, "RegistrarName":{ "shape":"RegistrarName", - "documentation":"

Name of the registrar of the domain as identified in the registry. Domains with a .com, .net, or .org TLD are registered by Amazon Registrar. All other domains are registered by our registrar associate, Gandi. The value for domains that are registered by Gandi is \"GANDI SAS\".

" + "documentation":"

Name of the registrar of the domain as identified in the registry.

" }, "WhoIsServer":{ "shape":"RegistrarWhoIsServer", @@ -1653,6 +1655,14 @@ "DnssecKeys":{ "shape":"DnssecKeyList", "documentation":"

A complex type that contains information about the DNSSEC configuration.

" + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

Provides details about the domain billing contact.

" + }, + "BillingPrivacy":{ + "shape":"Boolean", + "documentation":"

Specifies whether contact information is concealed from WHOIS queries. If the value is true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If the value is false, WHOIS queries return the information that you entered for the billing contact.

" } }, "documentation":"

The GetDomainDetail response includes the following elements.

" @@ -2035,13 +2045,15 @@ "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", - "INTERNAL_TRANSFER_IN_DOMAIN" + "INTERNAL_TRANSFER_IN_DOMAIN", + "RELEASE_TO_GANDI", + "TRANSFER_ON_RENEW" ] }, "OperationTypeList":{ "type":"list", "member":{"shape":"OperationType"}, - "max":18 + "max":20 }, "Operator":{ "type":"string", @@ -2147,15 +2159,23 @@ }, "PrivacyProtectAdminContact":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the admin contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

Default: true

" }, "PrivacyProtectRegistrantContact":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the registrant contact (the domain owner).

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the registrant contact (the domain owner).

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

Default: true

" }, "PrivacyProtectTechContact":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

Default: true

" + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

Provides detailed contact information. For information about the values that you specify for each element, see ContactDetail.

" + }, + "PrivacyProtectBillingContact":{ + "shape":"Boolean", + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the billing contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

" } }, "documentation":"

The RegisterDomain request includes the following elements.

" @@ -2427,11 +2447,19 @@ }, "PrivacyProtectRegistrantContact":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

Default: true

" }, "PrivacyProtectTechContact":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

Default: true

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

Default: true

" + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

Provides detailed contact information.

" + }, + "PrivacyProtectBillingContact":{ + "shape":"Boolean", + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the billing contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

" } }, "documentation":"

The TransferDomain request includes the following elements.

" @@ -2480,7 +2508,7 @@ }, "Transferable":{ "type":"string", - "documentation":"

Whether the domain name can be transferred to Route 53.

You can transfer only domains that have a value of TRANSFERABLE or Transferable.

Valid values:

TRANSFERABLE

The domain name can be transferred to Route 53.

UNTRANSFERRABLE

The domain name can't be transferred to Route 53.

DONT_KNOW

Reserved for future use.

DOMAIN_IN_OWN_ACCOUNT

The domain already exists in the current Amazon Web Services account.

DOMAIN_IN_ANOTHER_ACCOUNT

the domain exists in another Amazon Web Services account.

PREMIUM_DOMAIN

Premium domain transfer is not supported.

", + "documentation":"

Whether the domain name can be transferred to Route 53.

You can transfer only domains that have a value of TRANSFERABLE or Transferable.

Valid values:

TRANSFERABLE

The domain name can be transferred to Route 53.

UNTRANSFERRABLE

The domain name can't be transferred to Route 53.

DONT_KNOW

Reserved for future use.

DOMAIN_IN_OWN_ACCOUNT

The domain already exists in the current Amazon Web Services account.

DOMAIN_IN_ANOTHER_ACCOUNT

The domain exists in another Amazon Web Services account.

PREMIUM_DOMAIN

Premium domain transfer is not supported.

", "enum":[ "TRANSFERABLE", "UNTRANSFERABLE", @@ -2511,15 +2539,19 @@ }, "AdminPrivacy":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the admin contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the admin contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

" }, "RegistrantPrivacy":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the registrant contact (domain owner).

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

" }, "TechPrivacy":{ "shape":"Boolean", - "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, registrant, and technical contacts.

" + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the technical contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

" + }, + "BillingPrivacy":{ + "shape":"Boolean", + "documentation":"

Whether you want to conceal contact information from WHOIS queries. If you specify true, WHOIS (\"who is\") queries return contact information either for Amazon Registrar or for our registrar associate, Gandi. If you specify false, WHOIS queries return the information that you entered for the billing contact.

You must specify the same privacy setting for the administrative, billing, registrant, and technical contacts.

" } }, "documentation":"

The UpdateDomainContactPrivacy request includes the following elements.

" @@ -2557,6 +2589,10 @@ "Consent":{ "shape":"Consent", "documentation":"

Customer's consent for the owner change request. Required if the domain is not free (consent price is more than $0.00).

" + }, + "BillingContact":{ + "shape":"ContactDetail", + "documentation":"

Provides detailed contact information.

" } }, "documentation":"

The UpdateDomainContact request includes the following elements.

" diff -Nru awscli-2.15.9/awscli/botocore/data/s3control/2018-08-20/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/s3control/2018-08-20/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/s3control/2018-08-20/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/s3control/2018-08-20/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -311,6 +311,29 @@ { "conditions": [ { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { "fn": "isValidHostLabel", "argv": [ { @@ -324,21 +347,6 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid configuration: Outposts do not support dual-stack", - "type": "error" - }, - { - "conditions": [ - { "fn": "isSet", "argv": [ { @@ -382,6 +390,43 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{Region}.{partitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] } ], "endpoint": { @@ -401,6 +446,34 @@ "type": "endpoint" }, { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts.{Region}.{partitionResult#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" + }, + { "conditions": [], "endpoint": { "url": "https://s3-outposts.{Region}.{partitionResult#dnsSuffix}", @@ -503,21 +576,6 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid configuration: Outpost Access Points do not support dual-stack", - "type": "error" - }, - { - "conditions": [ - { "fn": "getAttr", "argv": [ { @@ -548,6 +606,29 @@ "fn": "isSet", "argv": [ { + "ref": "Endpoint" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { "ref": "UseArnRegion" } ] @@ -785,6 +866,50 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{accessPointArn#region}.{arnPartition#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] } ], "endpoint": { @@ -813,6 +938,41 @@ { "conditions": [ { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts.{accessPointArn#region}.{arnPartition#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{accessPointArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{accessPointArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, + { + "conditions": [ + { "fn": "isSet", "argv": [ { @@ -1041,21 +1201,6 @@ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid configuration: Outpost buckets do not support dual-stack", - "type": "error" - }, - { - "conditions": [ - { "fn": "getAttr", "argv": [ { @@ -1086,6 +1231,29 @@ "fn": "isSet", "argv": [ { + "ref": "Endpoint" + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: DualStack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { "ref": "UseArnRegion" } ] @@ -1323,6 +1491,50 @@ }, true ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts-fips.{bucketArn#region}.{arnPartition#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] } ], "endpoint": { @@ -1330,6 +1542,41 @@ "properties": { "authSchemes": [ { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{bucketArn#region}" + } + ] + }, + "headers": { + "x-amz-account-id": [ + "{bucketArn#accountId}" + ], + "x-amz-outpost-id": [ + "{outpostId}" + ] + } + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "endpoint": { + "url": "https://s3-outposts.{bucketArn#region}.{arnPartition#dualStackDnsSuffix}", + "properties": { + "authSchemes": [ + { "disableDoubleEncoding": true, "name": "sigv4", "signingName": "s3-outposts", diff -Nru awscli-2.15.9/awscli/botocore/data/sagemaker/2017-07-24/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/sagemaker/2017-07-24/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/sagemaker/2017-07-24/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/sagemaker/2017-07-24/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -262,7 +262,7 @@ } ], "endpoint": { - "url": "https://api-fips.sagemaker.{Region}.amazonaws.com", + "url": "https://api.sagemaker.{Region}.amazonaws.com", "properties": {}, "headers": {} }, diff -Nru awscli-2.15.9/awscli/botocore/data/sagemaker/2017-07-24/service-2.json awscli-2.15.22/awscli/botocore/data/sagemaker/2017-07-24/service-2.json --- awscli-2.15.9/awscli/botocore/data/sagemaker/2017-07-24/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/sagemaker/2017-07-24/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -110,7 +110,7 @@ "errors":[ {"shape":"ResourceInUse"} ], - "documentation":"

Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System (EFS) storage volume on the image, and a list of the kernels in the image.

" + "documentation":"

Creates a configuration for running a SageMaker image as a KernelGateway app. The configuration specifies the Amazon Elastic File System storage volume on the image, and a list of the kernels in the image.

" }, "CreateArtifact":{ "name":"CreateArtifact", @@ -243,7 +243,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a Domain. A domain consists of an associated Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other.

EFS storage

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption.

VPC configuration

All traffic between the domain and the EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available:

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully.

For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC.

" + "documentation":"

Creates a Domain. A domain consists of an associated Amazon Elastic File System volume, a list of authorized users, and a variety of security, application, policy, and Amazon Virtual Private Cloud (VPC) configurations. Users within a domain can share notebook files and other artifacts with each other.

EFS storage

When a domain is created, an EFS volume is created for use by all of the users within the domain. Each user receives a private home directory within the EFS volume for notebooks, Git repositories, and data files.

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with an Amazon Web Services managed key by default. For more control, you can specify a customer managed key. For more information, see Protect Data at Rest Using Encryption.

VPC configuration

All traffic between the domain and the Amazon EFS volume is through the specified VPC and subnets. For other traffic, you can specify the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the network access type that you choose when you onboard to the domain. The following options are available:

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules in order to launch a Amazon SageMaker Studio app successfully.

For more information, see Connect Amazon SageMaker Studio Notebooks to Resources in a VPC.

" }, "CreateEdgeDeploymentPlan":{ "name":"CreateEdgeDeploymentPlan", @@ -403,7 +403,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon Elastic Container Registry (ECR). For more information, see Bring your own SageMaker image.

" + "documentation":"

Creates a custom SageMaker image. A SageMaker image is a set of image versions. Each image version represents a container image stored in Amazon ECR. For more information, see Bring your own SageMaker image.

" }, "CreateImageVersion":{ "name":"CreateImageVersion", @@ -418,7 +418,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceNotFound"} ], - "documentation":"

Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon Elastic Container Registry (ECR) container image specified by BaseImage.

" + "documentation":"

Creates a version of the SageMaker image specified by ImageName. The version represents the Amazon ECR container image specified by BaseImage.

" }, "CreateInferenceComponent":{ "name":"CreateInferenceComponent", @@ -624,7 +624,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" + "documentation":"

Creates a lifecycle configuration that you can associate with a notebook instance. A lifecycle configuration is a collection of shell scripts that run when you create or start a notebook instance.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, "CreatePipeline":{ "name":"CreatePipeline", @@ -652,7 +652,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System (EFS) volume. This operation can only be called when the authentication mode equals IAM.

The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.

You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint .

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.

" + "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM.

The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.

You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker Studio Through an Interface VPC Endpoint .

The URL that you get from a call to CreatePresignedDomainUrl has a default timeout of 5 minutes. You can configure this value using ExpiresInSeconds. If you try to use the URL after the timeout limit expires, you are directed to the Amazon Web Services console sign-in page.

" }, "CreatePresignedNotebookInstanceUrl":{ "name":"CreatePresignedNotebookInstanceUrl", @@ -704,7 +704,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a space used for real time collaboration in a Domain.

" + "documentation":"

Creates a space used for real time collaboration in a domain.

" }, "CreateStudioLifecycleConfig":{ "name":"CreateStudioLifecycleConfig", @@ -788,7 +788,7 @@ {"shape":"ResourceLimitExceeded"}, {"shape":"ResourceInUse"} ], - "documentation":"

Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to a domain. If an administrator invites a person by email or imports them from IAM Identity Center, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System (EFS) home directory.

" + "documentation":"

Creates a user profile. A user profile represents a single user within a domain, and is the main way to reference a \"person\" for the purposes of sharing, reporting, and other user-oriented features. This entity is created when a user onboards to a domain. If an administrator invites a person by email or imports them from IAM Identity Center, a user profile is automatically created. A user profile is the primary holder of settings for an individual user and has a reference to the user's private Amazon Elastic File System home directory.

" }, "CreateWorkforce":{ "name":"CreateWorkforce", @@ -1095,6 +1095,15 @@ ], "documentation":"

Use this operation to delete a human task user interface (worker task template).

To see a list of human task user interfaces (work task templates) in your account, use ListHumanTaskUis. When you delete a worker task template, it no longer appears when you call ListHumanTaskUis.

" }, + "DeleteHyperParameterTuningJob":{ + "name":"DeleteHyperParameterTuningJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteHyperParameterTuningJobRequest"}, + "documentation":"

Deletes a hyperparameter tuning job. The DeleteHyperParameterTuningJob API deletes only the tuning job entry that was created in SageMaker when you called the CreateHyperParameterTuningJob API. It does not delete training jobs, artifacts, or the IAM role that you specified when creating the model.

" + }, "DeleteImage":{ "name":"DeleteImage", "http":{ @@ -3467,7 +3476,21 @@ {"shape":"ResourceNotFound"}, {"shape":"ConflictException"} ], - "documentation":"

Update a SageMaker HyperPod cluster.

" + "documentation":"

Updates a SageMaker HyperPod cluster.

" + }, + "UpdateClusterSoftware":{ + "name":"UpdateClusterSoftware", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateClusterSoftwareRequest"}, + "output":{"shape":"UpdateClusterSoftwareResponse"}, + "errors":[ + {"shape":"ResourceNotFound"}, + {"shape":"ConflictException"} + ], + "documentation":"

Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster.

" }, "UpdateCodeRepository":{ "name":"UpdateCodeRepository", @@ -3543,7 +3566,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss).

When SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

" + "documentation":"

Deploys the EndpointConfig specified in the request to a new fleet of instances. SageMaker shifts endpoint traffic to the new instances with the updated endpoint configuration and then deletes the old instances using the previous EndpointConfig (there is no availability loss). For more information about how to control the update and traffic shifting process, see Update models in production.

When SageMaker receives the request, it sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

You must not delete an EndpointConfig in use by an endpoint that is live or while the UpdateEndpoint or CreateEndpoint operations are being performed on the endpoint. To update an endpoint, you must create a new EndpointConfig.

If you delete the EndpointConfig of an endpoint that is active or being created or updated you may lose visibility into the instance type the endpoint is using. The endpoint must be deleted in order to stop incurring charges.

" }, "UpdateEndpointWeightsAndCapacities":{ "name":"UpdateEndpointWeightsAndCapacities", @@ -4120,7 +4143,10 @@ }, "AdditionalS3DataSourceDataType":{ "type":"string", - "enum":["S3Object"] + "enum":[ + "S3Object", + "S3Prefix" + ] }, "AgentVersion":{ "type":"structure", @@ -4430,7 +4456,7 @@ "members":{ "AppImageConfigArn":{ "shape":"AppImageConfigArn", - "documentation":"

The Amazon Resource Name (ARN) of the AppImageConfig.

" + "documentation":"

The ARN of the AppImageConfig.

" }, "AppImageConfigName":{ "shape":"AppImageConfigName", @@ -4604,17 +4630,11 @@ "KernelGateway", "DetailedProfiler", "TensorBoard", - "VSCode", - "Savitur", "CodeEditor", "JupyterLab", "RStudioServerPro", - "RSession", "RSessionGateway", - "Canvas", - "DatasetManager", - "SageMakerLite", - "Local" + "Canvas" ] }, "ApprovalDescription":{ @@ -5535,7 +5555,7 @@ "members":{ "KmsKeyId":{ "shape":"KmsKeyId", - "documentation":"

The Key Management Service (KMS) encryption key ID.

" + "documentation":"

The Key Management Service encryption key ID.

" }, "S3OutputPath":{ "shape":"S3Uri", @@ -6160,6 +6180,10 @@ "KendraSettings":{ "shape":"KendraSettings", "documentation":"

The settings for document querying.

" + }, + "GenerativeAiSettings":{ + "shape":"GenerativeAiSettings", + "documentation":"

The generative AI settings for the SageMaker Canvas application.

" } }, "documentation":"

The SageMaker Canvas application settings.

" @@ -7864,7 +7888,7 @@ "members":{ "AppImageConfigArn":{ "shape":"AppImageConfigArn", - "documentation":"

The Amazon Resource Name (ARN) of the AppImageConfig.

" + "documentation":"

The ARN of the AppImageConfig.

" } } }, @@ -7977,7 +8001,7 @@ }, "ProblemType":{ "shape":"ProblemType", - "documentation":"

Defines the type of supervised learning problem available for the candidates. For more information, see Amazon SageMaker Autopilot problem types.

" + "documentation":"

Defines the type of supervised learning problem available for the candidates. For more information, see SageMaker Autopilot problem types.

" }, "AutoMLJobObjective":{ "shape":"AutoMLJobObjective", @@ -8892,7 +8916,7 @@ "members":{ "BaseImage":{ "shape":"ImageBaseImage", - "documentation":"

The registry path of the container image to use as the starting point for this version. The path is an Amazon Elastic Container Registry (ECR) URI in the following format:

<acct-id>.dkr.ecr.<region>.amazonaws.com/<repo-name[:tag] or [@digest]>

" + "documentation":"

The registry path of the container image to use as the starting point for this version. The path is an Amazon ECR URI in the following format:

<acct-id>.dkr.ecr.<region>.amazonaws.com/<repo-name[:tag] or [@digest]>

" }, "ClientToken":{ "shape":"ClientToken", @@ -9953,7 +9977,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The ID of the associated Domain.

" + "documentation":"

The ID of the associated domain.

" }, "SpaceName":{ "shape":"SpaceName", @@ -10929,12 +10953,12 @@ }, "SecurityGroups":{ "shape":"SecurityGroupIds", - "documentation":"

The security group IDs for the Amazon Virtual Private Cloud that the space uses for communication.

" + "documentation":"

The security group IDs for the Amazon VPC that the space uses for communication.

" }, "JupyterServerAppSettings":{"shape":"JupyterServerAppSettings"}, "KernelGatewayAppSettings":{"shape":"KernelGatewayAppSettings"} }, - "documentation":"

A collection of settings that apply to spaces created in the Domain.

" + "documentation":"

A collection of settings that apply to spaces created in the domain.

" }, "DefaultSpaceStorageSettings":{ "type":"structure", @@ -11308,6 +11332,16 @@ "members":{ } }, + "DeleteHyperParameterTuningJobRequest":{ + "type":"structure", + "required":["HyperParameterTuningJobName"], + "members":{ + "HyperParameterTuningJobName":{ + "shape":"HyperParameterTuningJobName", + "documentation":"

The name of the hyperparameter tuning job that you want to delete.

" + } + } + }, "DeleteImageRequest":{ "type":"structure", "required":["ImageName"], @@ -11532,7 +11566,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The ID of the associated Domain.

" + "documentation":"

The ID of the associated domain.

" }, "SpaceName":{ "shape":"SpaceName", @@ -11956,7 +11990,7 @@ "members":{ "AppImageConfigArn":{ "shape":"AppImageConfigArn", - "documentation":"

The Amazon Resource Name (ARN) of the AppImageConfig.

" + "documentation":"

The ARN of the AppImageConfig.

" }, "AppImageConfigName":{ "shape":"AppImageConfigName", @@ -12155,7 +12189,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role that has read permission to the input data location and write permission to the output data location in Amazon S3.

" + "documentation":"

The ARN of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.

" }, "AutoMLJobObjective":{ "shape":"AutoMLJobObjective", @@ -12265,7 +12299,7 @@ }, "RoleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the Identity and Access Management role that has read permission to the input data location and write permission to the output data location in Amazon S3.

" + "documentation":"

The ARN of the IAM role that has read permission to the input data location and write permission to the output data location in Amazon S3.

" }, "AutoMLJobObjective":{ "shape":"AutoMLJobObjective", @@ -12816,7 +12850,7 @@ }, "HomeEfsFileSystemId":{ "shape":"ResourceId", - "documentation":"

The ID of the Amazon Elastic File System (EFS) managed by this Domain.

" + "documentation":"

The ID of the Amazon Elastic File System managed by this Domain.

" }, "SingleSignOnManagedApplicationInstanceId":{ "shape":"String256", @@ -13710,7 +13744,7 @@ }, "HyperParameterTuningJobStatus":{ "shape":"HyperParameterTuningJobStatus", - "documentation":"

The status of the tuning job: InProgress, Completed, Failed, Stopping, or Stopped.

" + "documentation":"

The status of the tuning job.

" }, "CreationTime":{ "shape":"Timestamp", @@ -14590,7 +14624,7 @@ "members":{ "ModelPackageGroupName":{ "shape":"ArnOrName", - "documentation":"

The name of gthe model group to describe.

" + "documentation":"

The name of the model group to describe.

" } } }, @@ -15328,7 +15362,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The ID of the associated Domain.

" + "documentation":"

The ID of the associated domain.

" }, "SpaceName":{ "shape":"SpaceName", @@ -15341,7 +15375,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The ID of the associated Domain.

" + "documentation":"

The ID of the associated domain.

" }, "SpaceArn":{ "shape":"SpaceArn", @@ -15353,7 +15387,7 @@ }, "HomeEfsFileSystemUid":{ "shape":"EfsUid", - "documentation":"

The ID of the space's profile in the Amazon Elastic File System volume.

" + "documentation":"

The ID of the space's profile in the Amazon EFS volume.

" }, "Status":{ "shape":"SpaceStatus", @@ -15907,7 +15941,7 @@ }, "HomeEfsFileSystemUid":{ "shape":"EfsUid", - "documentation":"

The ID of the user's profile in the Amazon Elastic File System (EFS) volume.

" + "documentation":"

The ID of the user's profile in the Amazon Elastic File System volume.

" }, "Status":{ "shape":"UserProfileStatus", @@ -18035,7 +18069,7 @@ "box":true } }, - "documentation":"

The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.

" + "documentation":"

The Amazon Elastic File System storage configuration for a SageMaker image.

" }, "FileSystemDataSource":{ "type":"structure", @@ -18361,6 +18395,16 @@ "pattern":"[0-9]\\.[A-Za-z0-9.]+" }, "GenerateCandidateDefinitionsOnly":{"type":"boolean"}, + "GenerativeAiSettings":{ + "type":"structure", + "members":{ + "AmazonBedrockRoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of an Amazon Web Services IAM role that allows fine-tuning of large language models (LLMs) in Amazon Bedrock. The IAM role should have Amazon S3 read and write permissions, as well as a trust relationship that establishes bedrock.amazonaws.com as a service principal.

" + } + }, + "documentation":"

The generative AI settings for the SageMaker Canvas application.

Configure these settings for Canvas users starting chats with generative AI foundation models. For more information, see Use generative AI with foundation models.

" + }, "GetDeviceFleetReportRequest":{ "type":"structure", "required":["DeviceFleetName"], @@ -19707,7 +19751,7 @@ }, "MaxResource":{ "shape":"HyperbandStrategyMaxResource", - "documentation":"

The maximum number of resources (such as epochs) that can be used by a training job launched by a hyperparameter tuning job. Once a job reaches the MaxResource value, it is stopped. If a value for MaxResource is not provided, and Hyperband is selected as the hyperparameter tuning strategy, HyperbandTrainingJ attempts to infer MaxResource from the following keys (if present) in StaticsHyperParameters:

If HyperbandStrategyConfig is unable to infer a value for MaxResource, it generates a validation error. The maximum value is 20,000 epochs. All metrics that correspond to an objective metric are used to derive early stopping decisions. For distributive training jobs, ensure that duplicate metrics are not printed in the logs across the individual nodes in a training job. If multiple nodes are publishing duplicate or incorrect metrics, training jobs may make an incorrect stopping decision and stop the job prematurely.

" + "documentation":"

The maximum number of resources (such as epochs) that can be used by a training job launched by a hyperparameter tuning job. Once a job reaches the MaxResource value, it is stopped. If a value for MaxResource is not provided, and Hyperband is selected as the hyperparameter tuning strategy, HyperbandTraining attempts to infer MaxResource from the following keys (if present) in StaticsHyperParameters:

If HyperbandStrategyConfig is unable to infer a value for MaxResource, it generates a validation error. The maximum value is 20,000 epochs. All metrics that correspond to an objective metric are used to derive early stopping decisions. For distributed training jobs, ensure that duplicate metrics are not printed in the logs across the individual nodes in a training job. If multiple nodes are publishing duplicate or incorrect metrics, training jobs may make an incorrect stopping decision and stop the job prematurely.

" } }, "documentation":"

The configuration for Hyperband, a multi-fidelity based hyperparameter tuning strategy. Hyperband uses the final and intermediate results of a training job to dynamically allocate resources to utilized hyperparameter configurations while automatically stopping under-performing configurations. This parameter should be provided only if Hyperband is selected as the StrategyConfig under the HyperParameterTuningJobConfig API.

" @@ -21049,7 +21093,7 @@ "members":{ "DefaultResourceSpec":{ "shape":"ResourceSpec", - "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the Amazon Web Services Command Line Interface or Amazon Web Services CloudFormation and the instance type parameter value is not passed.

" + "documentation":"

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.

The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the CLI or CloudFormation and the instance type parameter value is not passed.

" }, "CustomImages":{ "shape":"CustomImages", @@ -21072,7 +21116,7 @@ }, "FileSystemConfig":{ "shape":"FileSystemConfig", - "documentation":"

The Amazon Elastic File System (EFS) storage configuration for a SageMaker image.

" + "documentation":"

The Amazon Elastic File System storage configuration for a SageMaker image.

" } }, "documentation":"

The configuration for the file system and kernels in a SageMaker image running as a KernelGateway app.

" @@ -24842,7 +24886,7 @@ }, "DomainIdEquals":{ "shape":"DomainId", - "documentation":"

A parameter to search for the Domain ID.

" + "documentation":"

A parameter to search for the domain ID.

" }, "SpaceNameContains":{ "shape":"SpaceName", @@ -25798,7 +25842,7 @@ "documentation":"

Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.

" } }, - "documentation":"

The access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig. For more information, see End-user license agreements.

" + "documentation":"

The access configuration file to control access to the ML model. You can explicitly accept the model end-user license agreement (EULA) within the ModelAccessConfig.

" }, "ModelApprovalStatus":{ "type":"string", @@ -25823,7 +25867,7 @@ "documentation":"

The path of the S3 object that contains the model artifacts. For example, s3://bucket-name/keynameprefix/model.tar.gz.

" } }, - "documentation":"

Provides information about the location that is configured for storing model artifacts.

Model artifacts are the output that results from training a model, and typically consist of trained parameters, a model definition that describes how to compute inferences, and other metadata.

" + "documentation":"

Provides information about the location that is configured for storing model artifacts.

Model artifacts are outputs that result from training a model. They typically consist of trained parameters, a model definition that describes how to compute inferences, and other metadata. A SageMaker container stores your trained model artifacts in the /opt/ml/model directory. After training has completed, by default, these artifacts are uploaded to your Amazon S3 bucket as compressed files.

" }, "ModelBiasAppSpecification":{ "type":"structure", @@ -28265,7 +28309,7 @@ "documentation":"

A base64-encoded string that contains a shell script for a notebook instance lifecycle configuration.

" } }, - "documentation":"

Contains the notebook instance lifecycle configuration script.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" + "documentation":"

Contains the notebook instance lifecycle configuration script.

Each lifecycle configuration script has a limit of 16384 characters.

The value of the $PATH environment variable that is available to both scripts is /sbin:bin:/usr/sbin:/usr/bin.

View Amazon CloudWatch Logs for notebook instance lifecycle configurations in log group /aws/sagemaker/NotebookInstances in log stream [notebook-instance-name]/[LifecycleConfigHook].

Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs for longer than 5 minutes, it fails and the notebook instance is not created or started.

For information about notebook instance lifestyle configurations, see Step 2.1: (Optional) Customize a Notebook Instance.

" }, "NotebookInstanceName":{ "type":"string", @@ -31884,10 +31928,10 @@ "members":{ "HomeEfsFileSystem":{ "shape":"RetentionType", - "documentation":"

The default is Retain, which specifies to keep the data stored on the EFS volume.

Specify Delete to delete the data stored on the EFS volume.

" + "documentation":"

The default is Retain, which specifies to keep the data stored on the Amazon EFS volume.

Specify Delete to delete the data stored on the Amazon EFS volume.

" } }, - "documentation":"

The retention policy for data stored on an Amazon Elastic File System (EFS) volume.

" + "documentation":"

The retention policy for data stored on an Amazon Elastic File System volume.

" }, "RetentionType":{ "type":"string", @@ -32028,7 +32072,7 @@ }, "S3Uri":{ "shape":"S3Uri", - "documentation":"

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

Your input bucket must be located in same Amazon Web Services region as your training job.

" + "documentation":"

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

Your input bucket must be located in same Amazon Web Services region as your training job.

" }, "S3DataDistributionType":{ "shape":"S3DataDistribution", @@ -32866,7 +32910,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The ID of the associated Domain.

" + "documentation":"

The ID of the associated domain.

" }, "SpaceName":{ "shape":"SpaceName", @@ -33475,8 +33519,6 @@ "enum":[ "JupyterServer", "KernelGateway", - "VSCode", - "Savitur", "CodeEditor", "JupyterLab" ] @@ -33632,7 +33674,7 @@ }, "ProblemType":{ "shape":"ProblemType", - "documentation":"

The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see Amazon SageMaker Autopilot problem types.

You must either specify the type of supervised learning problem in ProblemType and provide the AutoMLJobObjective metric, or none at all.

" + "documentation":"

The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see SageMaker Autopilot problem types.

You must either specify the type of supervised learning problem in ProblemType and provide the AutoMLJobObjective metric, or none at all.

" }, "TargetAttributeName":{ "shape":"TargetAttributeName", @@ -33650,7 +33692,7 @@ "members":{ "ProblemType":{ "shape":"ProblemType", - "documentation":"

The type of supervised learning problem available for the model candidates of the AutoML job V2 (Binary Classification, Multiclass Classification, Regression). For more information, see Amazon SageMaker Autopilot problem types.

" + "documentation":"

The type of supervised learning problem available for the model candidates of the AutoML job V2 (Binary Classification, Multiclass Classification, Regression). For more information, see SageMaker Autopilot problem types.

" } }, "documentation":"

The resolved attributes specific to the tabular problem type.

" @@ -34013,7 +34055,7 @@ "documentation":"

For provisioned feature groups, this indicates the write throughput you are billed for and can consume without throttling.

This field is not applicable for on-demand feature groups.

" } }, - "documentation":"

Active throughput configuration of the feature group. Used to set feature group throughput configuration. There are two modes: ON_DEMAND and PROVISIONED. With on-demand mode, you are charged for data reads and writes that your application performs on your feature group. You do not need to specify read and write throughput because Feature Store accommodates your workloads as they ramp up and down. You can switch a feature group to on-demand only once in a 24 hour period. With provisioned throughput mode, you specify the read and write capacity per second that you expect your application to require, and you are billed based on those limits. Exceeding provisioned throughput will result in your requests being throttled.

Note: PROVISIONED throughput mode is supported only for feature groups that are offline-only, or use the Standard tier online store.

" + "documentation":"

Active throughput configuration of the feature group. There are two modes: ON_DEMAND and PROVISIONED. With on-demand mode, you are charged for data reads and writes that your application performs on your feature group. You do not need to specify read and write throughput because Feature Store accommodates your workloads as they ramp up and down. You can switch a feature group to on-demand only once in a 24 hour period. With provisioned throughput mode, you specify the read and write capacity per second that you expect your application to require, and you are billed based on those limits. Exceeding provisioned throughput will result in your requests being throttled.

Note: PROVISIONED throughput mode is supported only for feature groups that are offline-only, or use the Standard tier online store.

" }, "ThroughputConfigUpdate":{ "type":"structure", @@ -35062,7 +35104,7 @@ }, "S3Uri":{ "shape":"S3Uri", - "documentation":"

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

" + "documentation":"

Depending on the value specified for the S3DataType, identifies either a key name prefix or a manifest. For example:

" } }, "documentation":"

Describes the S3 data source.

" @@ -35692,7 +35734,7 @@ "members":{ "AppImageConfigArn":{ "shape":"AppImageConfigArn", - "documentation":"

The Amazon Resource Name (ARN) for the AppImageConfig.

" + "documentation":"

The ARN for the AppImageConfig.

" } } }, @@ -35754,6 +35796,26 @@ } } }, + "UpdateClusterSoftwareRequest":{ + "type":"structure", + "required":["ClusterName"], + "members":{ + "ClusterName":{ + "shape":"ClusterNameOrArn", + "documentation":"

Specify the name or the Amazon Resource Name (ARN) of the SageMaker HyperPod cluster you want to update for security patching.

" + } + } + }, + "UpdateClusterSoftwareResponse":{ + "type":"structure", + "required":["ClusterArn"], + "members":{ + "ClusterArn":{ + "shape":"ClusterArn", + "documentation":"

The Amazon Resource Name (ARN) of the SageMaker HyperPod cluster being updated for security patching.

" + } + } + }, "UpdateCodeRepositoryInput":{ "type":"structure", "required":["CodeRepositoryName"], @@ -35877,7 +35939,7 @@ }, "DefaultSpaceSettings":{ "shape":"DefaultSpaceSettings", - "documentation":"

The default settings used to create a space within the Domain.

" + "documentation":"

The default settings used to create a space within the domain.

" }, "SubnetIds":{ "shape":"Subnets", @@ -36619,7 +36681,7 @@ "members":{ "DomainId":{ "shape":"DomainId", - "documentation":"

The ID of the associated Domain.

" + "documentation":"

The ID of the associated domain.

" }, "SpaceName":{ "shape":"SpaceName", @@ -37138,7 +37200,7 @@ "members":{ "Key":{ "shape":"VisibilityConditionsKey", - "documentation":"

The key for that specifies the tag that you're using to filter the search results. The key must start with Tags..

" + "documentation":"

The key that specifies the tag that you're using to filter the search results. It must be in the following format: Tags.<key>/EqualsIfExists.

" }, "Value":{ "shape":"VisibilityConditionsValue", @@ -37194,7 +37256,8 @@ }, "VpcOnlyTrustedAccounts":{ "type":"list", - "member":{"shape":"AccountId"} + "member":{"shape":"AccountId"}, + "max":10 }, "VpcSecurityGroupIds":{ "type":"list", diff -Nru awscli-2.15.9/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/service-2.json awscli-2.15.22/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/sagemaker-featurestore-runtime/2020-07-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -41,7 +41,7 @@ {"shape":"ServiceUnavailable"}, {"shape":"AccessForbidden"} ], - "documentation":"

Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the deleted record marker to the OfflineStore with feature values set to null, is_deleted value set to True, and EventTime set to the delete input EventTime.

Note that the EventTime specified in DeleteRecord should be set later than the EventTime of the existing record in the OnlineStore for that RecordIdentifer. If it is not, the deletion does not occur:

" + "documentation":"

Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the deleted record marker to the OfflineStore. The deleted record marker is a record with the same RecordIdentifer as the original, but with is_deleted value set to True, EventTime set to the delete input EventTime, and other feature values set to null.

Note that the EventTime specified in DeleteRecord should be set later than the EventTime of the existing record in the OnlineStore for that RecordIdentifer. If it is not, the deletion does not occur:

When a record is deleted from the OnlineStore, the deleted record marker is appended to the OfflineStore. If you have the Iceberg table format enabled for your OfflineStore, you can remove all history of a record from the OfflineStore using Amazon Athena or Apache Spark. For information on how to hard delete a record from the OfflineStore with the Iceberg table format enabled, see Delete records from the offline store.

" }, "GetRecord":{ "name":"GetRecord", @@ -145,7 +145,7 @@ "BatchGetRecordIdentifiers":{ "type":"list", "member":{"shape":"BatchGetRecordIdentifier"}, - "max":10, + "max":100, "min":1 }, "BatchGetRecordRequest":{ diff -Nru awscli-2.15.9/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json awscli-2.15.22/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json --- awscli-2.15.9/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -129,7 +129,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:GetRandomPassword. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support. By default, Secrets Manager uses uppercase and lowercase letters, numbers, and the following characters in passwords: !\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:GetRandomPassword. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "GetResourcePolicy":{ "name":"GetResourcePolicy", @@ -194,7 +194,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console.

ListSecrets is eventually consistent, however it might not reflect changes from the last five minutes. To get the latest information for a specific secret, use DescribeSecret.

To list the versions of a secret, use ListSecretVersionIds.

To retrieve the values for the secrets, call BatchGetSecretValue or GetSecretValue.

For information about finding secrets in the console, see Find secrets in Secrets Manager.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ListSecrets. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console.

All Secrets Manager operations are eventually consistent. ListSecrets might not reflect changes from the last five minutes. You can get more recent information for a specific secret by calling DescribeSecret.

To list the versions of a secret, use ListSecretVersionIds.

To retrieve the values for the secrets, call BatchGetSecretValue or GetSecretValue.

For information about finding secrets in the console, see Find secrets in Secrets Manager.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ListSecrets. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -264,7 +264,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Replicates the secret to a new Regions. See Multi-Region secrets.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Replicates the secret to a new Regions. See Multi-Region secrets.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ReplicateSecretToRegions. If the primary secret is encrypted with a KMS key other than aws/secretsmanager, you also need kms:Decrypt permission to the key. To encrypt the replicated secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Encrypt to the key. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "RestoreSecret":{ "name":"RestoreSecret", @@ -686,7 +686,7 @@ }, "LastRotatedDate":{ "shape":"LastRotatedDateType", - "documentation":"

The last date and time that Secrets Manager rotated the secret. If the secret isn't configured for rotation, Secrets Manager returns null.

", + "documentation":"

The last date and time that Secrets Manager rotated the secret. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null.

", "box":true }, "LastChangedDate":{ @@ -706,7 +706,7 @@ }, "NextRotationDate":{ "shape":"NextRotationDateType", - "documentation":"

The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation, Secrets Manager returns null.

" + "documentation":"

The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null. If rotation fails, Secrets Manager retries the entire rotation process multiple times. If rotation is unsuccessful, this date may be in the past.

" }, "Tags":{ "shape":"TagListType", @@ -921,7 +921,7 @@ }, "SecretBinary":{ "shape":"SecretBinaryType", - "documentation":"

The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. The response parameter represents the binary data as a base64-encoded string.

If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead.

" + "documentation":"

The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded.

If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead.

" }, "SecretString":{ "shape":"SecretStringType", @@ -1539,7 +1539,7 @@ }, "NextRotationDate":{ "shape":"NextRotationDateType", - "documentation":"

The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation, Secrets Manager returns null.

" + "documentation":"

The next rotation is scheduled to occur on or before this date. If the secret isn't configured for rotation or rotation has been disabled, Secrets Manager returns null.

" }, "Tags":{ "shape":"TagListType", diff -Nru awscli-2.15.9/awscli/botocore/data/securityhub/2018-10-26/service-2.json awscli-2.15.22/awscli/botocore/data/securityhub/2018-10-26/service-2.json --- awscli-2.15.9/awscli/botocore/data/securityhub/2018-10-26/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/securityhub/2018-10-26/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -10611,7 +10611,7 @@ }, "CompatibleRuntimes":{ "shape":"NonEmptyStringList", - "documentation":"

The layer's compatible runtimes. Maximum number of five items.

Valid values: nodejs10.x | nodejs12.x | java8 | java11 | python2.7 | python3.6 | python3.7 | python3.8 | dotnetcore1.0 | dotnetcore2.1 | go1.x | ruby2.5 | provided

" + "documentation":"

The layer's compatible function runtimes.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy in the Lambda Developer Guide.

Array Members: Maximum number of 5 items.

Valid Values: nodejs | nodejs4.3 | nodejs6.10 | nodejs8.10 | nodejs10.x | nodejs12.x | nodejs14.x | nodejs16.x | java8 | java8.al2 | java11 | python2.7 | python3.6 | python3.7 | python3.8 | python3.9 | dotnetcore1.0 | dotnetcore2.0 | dotnetcore2.1 | dotnetcore3.1 | dotnet6 | nodejs4.3-edge | go1.x | ruby2.5 | ruby2.7 | provided | provided.al2 | nodejs18.x | python3.10 | java17 | ruby3.2 | python3.11 | nodejs20.x | provided.al2023 | python3.12 | java21

" }, "CreatedDate":{ "shape":"NonEmptyString", @@ -21915,5 +21915,5 @@ "documentation":"

Used to update information about the investigation into the finding.

" } }, - "documentation":"

Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security Hub User Guide .

When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, run the same command for each Region in which you want to apply the change.

For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

The following throttling limits apply to using Security Hub API operations.

" + "documentation":"

Security Hub provides you with a comprehensive view of your security state in Amazon Web Services and helps you assess your Amazon Web Services environment against security industry standards and best practices.

Security Hub collects security data across Amazon Web Services accounts, Amazon Web Services, and supported third-party products and helps you analyze your security trends and identify the highest priority security issues.

To help you manage the security state of your organization, Security Hub supports multiple security standards. These include the Amazon Web Services Foundational Security Best Practices (FSBP) standard developed by Amazon Web Services, and external compliance frameworks such as the Center for Internet Security (CIS), the Payment Card Industry Data Security Standard (PCI DSS), and the National Institute of Standards and Technology (NIST). Each standard includes several security controls, each of which represents a security best practice. Security Hub runs checks against security controls and generates control findings to help you assess your compliance against security best practices.

In addition to generating control findings, Security Hub also receives findings from other Amazon Web Services, such as Amazon GuardDuty and Amazon Inspector, and supported third-party products. This gives you a single pane of glass into a variety of security-related issues. You can also send Security Hub findings to other Amazon Web Services and supported third-party products.

Security Hub offers automation features that help you triage and remediate security issues. For example, you can use automation rules to automatically update critical findings when a security check fails. You can also leverage the integration with Amazon EventBridge to trigger automatic responses to specific findings.

This guide, the Security Hub API Reference, provides information about the Security Hub API. This includes supported resources, HTTP methods, parameters, and schemas. If you're new to Security Hub, you might find it helpful to also review the Security Hub User Guide . The user guide explains key concepts and provides procedures that demonstrate how to use Security Hub features. It also provides information about topics such as integrating Security Hub with other Amazon Web Services.

In addition to interacting with Security Hub by making calls to the Security Hub API, you can use a current version of an Amazon Web Services command line tool or SDK. Amazon Web Services provides tools and SDKs that consist of libraries and sample code for various languages and platforms, such as PowerShell, Java, Go, Python, C++, and .NET. These tools and SDKs provide convenient, programmatic access to Security Hub and other Amazon Web Services . They also handle tasks such as signing requests, managing errors, and retrying requests automatically. For information about installing and using the Amazon Web Services tools and SDKs, see Tools to Build on Amazon Web Services.

With the exception of operations that are related to central configuration, Security Hub API requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, call the same API operation in each Region in which you want to apply the change. When you use central configuration, API requests for enabling Security Hub, standards, and controls are executed in the home Region and all linked Regions. For a list of central configuration operations, see the Central configuration terms and concepts section of the Security Hub User Guide.

The following throttling limits apply to Security Hub API operations.

" } diff -Nru awscli-2.15.9/awscli/botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/securitylake/2018-05-10/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -59,7 +58,6 @@ }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -87,13 +85,14 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [ @@ -106,7 +105,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -120,7 +118,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -143,7 +140,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -178,11 +174,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -193,16 +187,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -216,14 +213,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -232,15 +227,14 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -251,16 +245,19 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -274,7 +271,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -294,11 +290,9 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -309,20 +303,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], - "type": "tree", "rules": [ { "conditions": [], @@ -333,18 +329,22 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "Invalid Configuration: Missing Region", "type": "error" } - ] + ], + "type": "tree" } ] } \ No newline at end of file diff -Nru awscli-2.15.9/awscli/botocore/data/securitylake/2018-05-10/service-2.json awscli-2.15.22/awscli/botocore/data/securitylake/2018-05-10/service-2.json --- awscli-2.15.9/awscli/botocore/data/securitylake/2018-05-10/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/securitylake/2018-05-10/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -1217,14 +1217,14 @@ "members":{ "regions":{ "shape":"RegionList", - "documentation":"

Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. Amazon S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Amazon Web Services Regions or within the same Region as the source bucket.

Set up one or more rollup Regions by providing the Region or Regions that should contribute to the central rollup Region.

" + "documentation":"

Specifies one or more centralized rollup Regions. The Amazon Web Services Region specified in the region parameter of the CreateDataLake or UpdateDataLake operations contributes data to the rollup Region or Regions specified in this parameter.

Replication enables automatic, asynchronous copying of objects across Amazon S3 buckets. S3 buckets that are configured for object replication can be owned by the same Amazon Web Services account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Regions or within the same Region as the source bucket.

" }, "roleArn":{ "shape":"RoleArn", "documentation":"

Replication settings for the Amazon S3 buckets. This parameter uses the Identity and Access Management (IAM) role you created that is managed by Security Lake, to ensure the replication setting is correct.

" } }, - "documentation":"

Provides replication details of Amazon Security Lake object.

" + "documentation":"

Provides replication details for objects stored in the Amazon Security Lake data lake.

" }, "DataLakeResource":{ "type":"structure", @@ -1654,7 +1654,7 @@ }, "regions":{ "shape":"RegionList", - "documentation":"

List the Amazon Web Services Regions from which exceptions are retrieved.

" + "documentation":"

The Amazon Web Services Regions from which exceptions are retrieved.

" } } }, @@ -1676,7 +1676,7 @@ "members":{ "regions":{ "shape":"RegionList", - "documentation":"

The list of regions where Security Lake is enabled.

", + "documentation":"

The list of Regions where Security Lake is enabled.

", "location":"querystring", "locationName":"regions" } @@ -1708,7 +1708,7 @@ }, "regions":{ "shape":"RegionList", - "documentation":"

The list of regions for which log sources are displayed.

" + "documentation":"

The list of Regions for which log sources are displayed.

" }, "sources":{ "shape":"LogSourceResourceList", diff -Nru awscli-2.15.9/awscli/botocore/data/snowball/2016-06-30/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/snowball/2016-06-30/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/snowball/2016-06-30/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/snowball/2016-06-30/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/snowball/2016-06-30/service-2.json awscli-2.15.22/awscli/botocore/data/snowball/2016-06-30/service-2.json --- awscli-2.15.9/awscli/botocore/data/snowball/2016-06-30/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/snowball/2016-06-30/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -55,7 +55,7 @@ {"shape":"InvalidAddressException"}, {"shape":"UnsupportedAddressException"} ], - "documentation":"

Creates an address for a Snow device to be shipped to. In most regions, addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. If the address is invalid or unsupported, then an exception is thrown.

" + "documentation":"

Creates an address for a Snow device to be shipped to. In most regions, addresses are validated at the time of creation. The address you provide must be located within the serviceable area of your region. If the address is invalid or unsupported, then an exception is thrown. If providing an address as a JSON file through the cli-input-json option, include the full file path. For example, --cli-input-json file://create-address.json.

" }, "CreateCluster":{ "name":"CreateCluster", diff -Nru awscli-2.15.9/awscli/botocore/data/sns/2010-03-31/service-2.json awscli-2.15.22/awscli/botocore/data/sns/2010-03-31/service-2.json --- awscli-2.15.9/awscli/botocore/data/sns/2010-03-31/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/sns/2010-03-31/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -84,7 +84,7 @@ {"shape":"InternalErrorException"}, {"shape":"AuthorizationErrorException"} ], - "documentation":"

Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

PlatformPrincipal and PlatformCredential are received from the notification service.

You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

" + "documentation":"

Creates a platform application object for one of the supported push notification services, such as APNS and GCM (Firebase Cloud Messaging), to which devices and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential attributes when using the CreatePlatformApplication action.

PlatformPrincipal and PlatformCredential are received from the notification service.

You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action.

" }, "CreatePlatformEndpoint":{ "name":"CreatePlatformEndpoint", @@ -1027,7 +1027,7 @@ }, "Attributes":{ "shape":"MapStringToString", - "documentation":"

For a list of attributes, see SetPlatformApplicationAttributes.

" + "documentation":"

For a list of attributes, see SetPlatformApplicationAttributes .

" } }, "documentation":"

Input for CreatePlatformApplication action.

" @@ -1037,7 +1037,7 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

PlatformApplicationArn is returned.

" + "documentation":"

PlatformApplicationArn is returned.

" } }, "documentation":"

Response from CreatePlatformApplication action.

" @@ -1051,7 +1051,7 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

" + "documentation":"

PlatformApplicationArn returned from CreatePlatformApplication is used to create a an endpoint.

" }, "Token":{ "shape":"String", @@ -1063,7 +1063,7 @@ }, "Attributes":{ "shape":"MapStringToString", - "documentation":"

For a list of attributes, see SetEndpointAttributes.

" + "documentation":"

For a list of attributes, see SetEndpointAttributes .

" } }, "documentation":"

Input for CreatePlatformEndpoint action.

" @@ -1130,10 +1130,10 @@ "members":{ "EndpointArn":{ "shape":"String", - "documentation":"

EndpointArn of endpoint to delete.

" + "documentation":"

EndpointArn of endpoint to delete.

" } }, - "documentation":"

Input for DeleteEndpoint action.

" + "documentation":"

Input for DeleteEndpoint action.

" }, "DeletePlatformApplicationInput":{ "type":"structure", @@ -1141,10 +1141,10 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

PlatformApplicationArn of platform application object to delete.

" + "documentation":"

PlatformApplicationArn of platform application object to delete.

" } }, - "documentation":"

Input for DeletePlatformApplication action.

" + "documentation":"

Input for DeletePlatformApplication action.

" }, "DeleteSMSSandboxPhoneNumberInput":{ "type":"structure", @@ -1252,10 +1252,10 @@ "members":{ "EndpointArn":{ "shape":"String", - "documentation":"

EndpointArn for GetEndpointAttributes input.

" + "documentation":"

EndpointArn for GetEndpointAttributes input.

" } }, - "documentation":"

Input for GetEndpointAttributes action.

" + "documentation":"

Input for GetEndpointAttributes action.

" }, "GetEndpointAttributesResponse":{ "type":"structure", @@ -1265,7 +1265,7 @@ "documentation":"

Attributes include the following:

" } }, - "documentation":"

Response from GetEndpointAttributes of the EndpointArn.

" + "documentation":"

Response from GetEndpointAttributes of the EndpointArn.

" }, "GetPlatformApplicationAttributesInput":{ "type":"structure", @@ -1273,20 +1273,20 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

PlatformApplicationArn for GetPlatformApplicationAttributesInput.

" + "documentation":"

PlatformApplicationArn for GetPlatformApplicationAttributesInput.

" } }, - "documentation":"

Input for GetPlatformApplicationAttributes action.

" + "documentation":"

Input for GetPlatformApplicationAttributes action.

" }, "GetPlatformApplicationAttributesResponse":{ "type":"structure", "members":{ "Attributes":{ "shape":"MapStringToString", - "documentation":"

Attributes include the following:

" + "documentation":"

Attributes include the following:

" } }, - "documentation":"

Response for GetPlatformApplicationAttributes action.

" + "documentation":"

Response for GetPlatformApplicationAttributes action.

" }, "GetSMSAttributesInput":{ "type":"structure", @@ -1555,28 +1555,28 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

" + "documentation":"

PlatformApplicationArn for ListEndpointsByPlatformApplicationInput action.

" }, "NextToken":{ "shape":"String", - "documentation":"

NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

" + "documentation":"

NextToken string is used when calling ListEndpointsByPlatformApplication action to retrieve additional records that are available after the first page results.

" } }, - "documentation":"

Input for ListEndpointsByPlatformApplication action.

" + "documentation":"

Input for ListEndpointsByPlatformApplication action.

" }, "ListEndpointsByPlatformApplicationResponse":{ "type":"structure", "members":{ "Endpoints":{ "shape":"ListOfEndpoints", - "documentation":"

Endpoints returned for ListEndpointsByPlatformApplication action.

" + "documentation":"

Endpoints returned for ListEndpointsByPlatformApplication action.

" }, "NextToken":{ "shape":"String", - "documentation":"

NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

" + "documentation":"

NextToken string is returned when calling ListEndpointsByPlatformApplication action if additional records are available after the first page results.

" } }, - "documentation":"

Response for ListEndpointsByPlatformApplication action.

" + "documentation":"

Response for ListEndpointsByPlatformApplication action.

" }, "ListOfEndpoints":{ "type":"list", @@ -1641,24 +1641,24 @@ "members":{ "NextToken":{ "shape":"String", - "documentation":"

NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

" + "documentation":"

NextToken string is used when calling ListPlatformApplications action to retrieve additional records that are available after the first page results.

" } }, - "documentation":"

Input for ListPlatformApplications action.

" + "documentation":"

Input for ListPlatformApplications action.

" }, "ListPlatformApplicationsResponse":{ "type":"structure", "members":{ "PlatformApplications":{ "shape":"ListOfPlatformApplications", - "documentation":"

Platform applications returned when calling ListPlatformApplications action.

" + "documentation":"

Platform applications returned when calling ListPlatformApplications action.

" }, "NextToken":{ "shape":"String", - "documentation":"

NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

" + "documentation":"

NextToken string is returned when calling ListPlatformApplications action if additional records are available after the first page results.

" } }, - "documentation":"

Response for ListPlatformApplications action.

" + "documentation":"

Response for ListPlatformApplications action.

" }, "ListSMSSandboxPhoneNumbersInput":{ "type":"structure", @@ -1895,7 +1895,10 @@ }, "exception":true }, - "PhoneNumber":{"type":"string"}, + "PhoneNumber":{ + "type":"string", + "sensitive":true + }, "PhoneNumberInformation":{ "type":"structure", "members":{ @@ -1904,7 +1907,7 @@ "documentation":"

The date and time when the phone number was created.

" }, "PhoneNumber":{ - "shape":"String", + "shape":"PhoneNumber", "documentation":"

The phone number.

" }, "Status":{ @@ -1938,7 +1941,8 @@ "PhoneNumberString":{ "type":"string", "max":20, - "pattern":"^(\\+[0-9]{8,}|[0-9]{0,9})$" + "pattern":"^(\\+[0-9]{8,}|[0-9]{0,9})$", + "sensitive":true }, "PlatformApplication":{ "type":"structure", @@ -2077,7 +2081,7 @@ "documentation":"

If you don't specify a value for the TargetArn parameter, you must specify a value for the PhoneNumber or TopicArn parameters.

" }, "PhoneNumber":{ - "shape":"String", + "shape":"PhoneNumber", "documentation":"

The phone number to which you want to deliver an SMS message. Use E.164 format.

If you don't specify a value for the PhoneNumber parameter, you must specify a value for the TargetArn or TopicArn parameters.

" }, "Message":{ @@ -2226,14 +2230,14 @@ "members":{ "EndpointArn":{ "shape":"String", - "documentation":"

EndpointArn used for SetEndpointAttributes action.

" + "documentation":"

EndpointArn used for SetEndpointAttributes action.

" }, "Attributes":{ "shape":"MapStringToString", "documentation":"

A map of the endpoint attributes. Attributes in this map include the following:

" } }, - "documentation":"

Input for SetEndpointAttributes action.

" + "documentation":"

Input for SetEndpointAttributes action.

" }, "SetPlatformApplicationAttributesInput":{ "type":"structure", @@ -2244,14 +2248,14 @@ "members":{ "PlatformApplicationArn":{ "shape":"String", - "documentation":"

PlatformApplicationArn for SetPlatformApplicationAttributes action.

" + "documentation":"

PlatformApplicationArn for SetPlatformApplicationAttributes action.

" }, "Attributes":{ "shape":"MapStringToString", - "documentation":"

A map of the platform application attributes. Attributes in this map include the following:

The following attributes only apply to APNs token-based authentication:

" + "documentation":"

A map of the platform application attributes. Attributes in this map include the following:

The following attributes only apply to APNs token-based authentication:

" } }, - "documentation":"

Input for SetPlatformApplicationAttributes action.

" + "documentation":"

Input for SetPlatformApplicationAttributes action.

" }, "SetSMSAttributesInput":{ "type":"structure", diff -Nru awscli-2.15.9/awscli/botocore/data/ssm/2014-11-06/service-2.json awscli-2.15.22/awscli/botocore/data/ssm/2014-11-06/service-2.json --- awscli-2.15.9/awscli/botocore/data/ssm/2014-11-06/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/ssm/2014-11-06/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -582,7 +582,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Lists all patches eligible to be included in a patch baseline.

" + "documentation":"

Lists all patches eligible to be included in a patch baseline.

Currently, DescribeAvailablePatches supports only the Amazon Linux 1, Amazon Linux 2, and Windows Server operating systems.

" }, "DescribeDocument":{ "name":"DescribeDocument", @@ -2504,6 +2504,11 @@ "documentation":"

Number of days to wait after the scheduled day to run an association.

", "box":true }, + "Duration":{ + "shape":"Duration", + "documentation":"

The number of hours that an association can run on specified targets. After the resulting cutoff time passes, associations that are currently running are cancelled, and no pending executions are started on remaining targets.

", + "box":true + }, "TargetMaps":{ "shape":"TargetMaps", "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.

", @@ -2633,6 +2638,11 @@ "documentation":"

Number of days to wait after the scheduled day to run an association.

", "box":true }, + "Duration":{ + "shape":"Duration", + "documentation":"

The number of hours that an association can run on specified targets. After the resulting cutoff time passes, associations that are currently running are cancelled, and no pending executions are started on remaining targets.

", + "box":true + }, "TargetMaps":{ "shape":"TargetMaps", "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.

", @@ -3066,6 +3076,11 @@ "documentation":"

Number of days to wait after the scheduled day to run an association.

", "box":true }, + "Duration":{ + "shape":"Duration", + "documentation":"

The number of hours that an association can run on specified targets. After the resulting cutoff time passes, associations that are currently running are cancelled, and no pending executions are started on remaining targets.

", + "box":true + }, "TargetMaps":{ "shape":"TargetMaps", "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.

", @@ -4560,6 +4575,11 @@ "documentation":"

Number of days to wait after the scheduled day to run an association.

", "box":true }, + "Duration":{ + "shape":"Duration", + "documentation":"

The number of hours the association can run before it is canceled. Duration applies to associations that are currently running, and any pending and in progress commands on all targets. If a target was taken offline for the association to run, it is made available again immediately, without a reboot.

The Duration parameter applies only when both these conditions are true:

", + "box":true + }, "TargetMaps":{ "shape":"TargetMaps", "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.

", @@ -4655,6 +4675,11 @@ "documentation":"

Number of days to wait after the scheduled day to run an association. For example, if you specified a cron schedule of cron(0 0 ? * THU#2 *), you could specify an offset of 3 to run the association each Sunday after the second Thursday of the month. For more information about cron schedules for associations, see Reference: Cron and rate expressions for Systems Manager in the Amazon Web Services Systems Manager User Guide.

To use offsets, you must specify the ApplyOnlyAtCronInterval parameter. This option tells the system not to run an association immediately after you create it.

", "box":true }, + "Duration":{ + "shape":"Duration", + "documentation":"

The number of hours the association can run before it is canceled. Duration applies to associations that are currently running, and any pending and in progress commands on all targets. If a target was taken offline for the association to run, it is made available again immediately, without a reboot.

The Duration parameter applies only when both these conditions are true:

", + "box":true + }, "TargetMaps":{ "shape":"TargetMaps", "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.

", @@ -5726,7 +5751,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" + "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, 12.6. This value is unique across all versions of a document, and can't be changed.

" } } }, @@ -6845,7 +6870,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" + "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, 12.6. This value is unique across all versions of a document, and can't be changed.

" }, "PlatformTypes":{ "shape":"PlatformTypeList", @@ -7026,7 +7051,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" + "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, 12.6. This value is unique across all versions of a document, and can't be changed.

" } }, "documentation":"

An SSM document required by the current document.

" @@ -7178,7 +7203,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

The version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" + "documentation":"

The version of the artifact associated with the document. For example, 12.6. This value is unique across all versions of a document, and can't be changed.

" }, "CreatedDate":{ "shape":"DateTime", @@ -7260,6 +7285,12 @@ "documentation":"

You can't specify a managed node ID in more than one association.

", "exception":true }, + "Duration":{ + "type":"integer", + "box":true, + "max":24, + "min":1 + }, "EffectiveInstanceAssociationMaxResults":{ "type":"integer", "max":5, @@ -7524,7 +7555,7 @@ }, "Status":{ "shape":"ConnectionStatus", - "documentation":"

The status of the connection to the managed node. For example, 'Connected' or 'Not Connected'.

" + "documentation":"

The status of the connection to the managed node.

" } } }, @@ -7602,7 +7633,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document and can't be changed.

" + "documentation":"

An optional field specifying the version of the artifact associated with the document. For example, 12.6. This value is unique across all versions of a document and can't be changed.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -7631,7 +7662,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

The version of the artifact associated with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" + "documentation":"

The version of the artifact associated with the document. For example, 12.6. This value is unique across all versions of a document, and can't be changed.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -12551,7 +12582,7 @@ }, "KeyId":{ "shape":"ParameterKeyId", - "documentation":"

The ID of the query key used for this parameter.

" + "documentation":"

The alias of the Key Management Service (KMS) key used to encrypt the parameter. Applies to SecureString parameters only

" }, "LastModifiedDate":{ "shape":"DateTime", @@ -12668,7 +12699,7 @@ }, "KeyId":{ "shape":"ParameterKeyId", - "documentation":"

The ID of the query key used for this parameter.

" + "documentation":"

The alias of the Key Management Service (KMS) key used to encrypt the parameter. Applies to SecureString parameters only.

" }, "LastModifiedDate":{ "shape":"DateTime", @@ -13095,7 +13126,7 @@ }, "CVEIds":{ "shape":"PatchCVEIds", - "documentation":"

The IDs of one or more Common Vulnerabilities and Exposure (CVE) issues that are resolved by the patch.

" + "documentation":"

The IDs of one or more Common Vulnerabilities and Exposure (CVE) issues that are resolved by the patch.

Currently, CVE ID values are reported only for patches with a status of Missing or Failed.

" } }, "documentation":"

Information about the state of a patch on a particular managed node as it relates to the patch baseline used to patch the node.

" @@ -15894,6 +15925,11 @@ "documentation":"

Number of days to wait after the scheduled day to run an association. For example, if you specified a cron schedule of cron(0 0 ? * THU#2 *), you could specify an offset of 3 to run the association each Sunday after the second Thursday of the month. For more information about cron schedules for associations, see Reference: Cron and rate expressions for Systems Manager in the Amazon Web Services Systems Manager User Guide.

To use offsets, you must specify the ApplyOnlyAtCronInterval parameter. This option tells the system not to run an association immediately after you create it.

", "box":true }, + "Duration":{ + "shape":"Duration", + "documentation":"

The number of hours the association can run before it is canceled. Duration applies to associations that are currently running, and any pending and in progress commands on all targets. If a target was taken offline for the association to run, it is made available again immediately, without a reboot.

The Duration parameter applies only when both these conditions are true:

", + "box":true + }, "TargetMaps":{ "shape":"TargetMaps", "documentation":"

A key-value mapping of document parameters to target resources. Both Targets and TargetMaps can't be specified together.

", @@ -16019,7 +16055,7 @@ }, "VersionName":{ "shape":"DocumentVersionName", - "documentation":"

An optional field specifying the version of the artifact you are updating with the document. For example, \"Release 12, Update 6\". This value is unique across all versions of a document, and can't be changed.

" + "documentation":"

An optional field specifying the version of the artifact you are updating with the document. For example, 12.6. This value is unique across all versions of a document, and can't be changed.

" }, "DocumentVersion":{ "shape":"DocumentVersion", diff -Nru awscli-2.15.9/awscli/botocore/data/storagegateway/2013-06-30/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/storagegateway/2013-06-30/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/storagegateway/2013-06-30/endpoint-rule-set-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/storagegateway/2013-06-30/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,11 +212,11 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -231,14 +227,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -252,7 +250,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -272,7 +269,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -283,14 +279,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -301,9 +299,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff -Nru awscli-2.15.9/awscli/botocore/data/storagegateway/2013-06-30/service-2.json awscli-2.15.22/awscli/botocore/data/storagegateway/2013-06-30/service-2.json --- awscli-2.15.9/awscli/botocore/data/storagegateway/2013-06-30/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/storagegateway/2013-06-30/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -656,7 +656,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns a description of the specified Amazon Resource Name (ARN) of virtual tapes. If a TapeARN is not specified, returns a description of all virtual tapes associated with the specified gateway. This operation is only supported in the tape gateway type.

" + "documentation":"

Returns a description of virtual tapes that correspond to the specified Amazon Resource Names (ARNs). If TapeARN is not specified, returns a description of the virtual tapes associated with the specified gateway. This operation is only supported for the tape gateway type.

The operation supports pagination. By default, the operation returns a maximum of up to 100 tapes. You can optionally specify the Limit field in the body to limit the number of tapes in the response. If the number of tapes returned in the response is truncated, the response includes a Marker field. You can use this Marker value in your subsequent request to retrieve the next set of tapes.

" }, "DescribeUploadBuffer":{ "name":"DescribeUploadBuffer", @@ -922,7 +922,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to S3. Amazon S3.

Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways.

For more information, see Getting file upload notification in the Amazon S3 File Gateway User Guide.

" + "documentation":"

Sends you notification through CloudWatch Events when all files written to your file share have been uploaded to Amazon S3.

Storage Gateway can send a notification through Amazon CloudWatch Events when all files written to your file share up to that point in time have been uploaded to Amazon S3. These files include files written to the file share up to the time that you make a request for notification. When the upload is done, Storage Gateway sends you notification through an Amazon CloudWatch Event. You can configure CloudWatch Events to send the notification through event targets such as Amazon SNS or Lambda function. This operation is only supported for S3 File Gateways.

For more information, see Getting file upload notification in the Amazon S3 File Gateway User Guide.

" }, "RefreshCache":{ "name":"RefreshCache", @@ -936,7 +936,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Refreshes the cached inventory of objects for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed, or replaced since the gateway last listed the bucket's contents and cached the results. This operation does not import files into the S3 File Gateway cache storage. It only updates the cached inventory to reflect changes in the inventory of the objects in the S3 bucket. This operation is only supported in the S3 File Gateway types.

You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the Storage Gateway User Guide. This operation is Only supported for S3 File Gateways.

When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through a CloudWatch event when your RefreshCache operation completes.

Throttle limit: This API is asynchronous, so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the Storage Gateway User Guide.

  • Wait at least 60 seconds between consecutive RefreshCache API requests.

  • If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

The S3 bucket name does not need to be included when entering the list of folders in the FolderList parameter.

For more information, see Getting notified about file operations in the Storage Gateway User Guide.

" + "documentation":"

Refreshes the cached inventory of objects for the specified file share. This operation finds objects in the Amazon S3 bucket that were added, removed, or replaced since the gateway last listed the bucket's contents and cached the results. This operation does not import files into the S3 File Gateway cache storage. It only updates the cached inventory to reflect changes in the inventory of the objects in the S3 bucket. This operation is only supported in the S3 File Gateway types.

You can subscribe to be notified through an Amazon CloudWatch event when your RefreshCache operation completes. For more information, see Getting notified about file operations in the Amazon S3 File Gateway User Guide. This operation is Only supported for S3 File Gateways.

When this API is called, it only initiates the refresh operation. When the API call completes and returns a success code, it doesn't necessarily mean that the file refresh has completed. You should use the refresh-complete notification to determine that the operation has completed before you check for new files on the gateway file share. You can subscribe to be notified through a CloudWatch event when your RefreshCache operation completes.

Throttle limit: This API is asynchronous, so the gateway will accept no more than two refreshes at any time. We recommend using the refresh-complete CloudWatch event notification before issuing additional requests. For more information, see Getting notified about file operations in the Amazon S3 File Gateway User Guide.

  • Wait at least 60 seconds between consecutive RefreshCache API requests.

  • If you invoke the RefreshCache API when two requests are already being processed, any new request will cause an InvalidGatewayRequestException error because too many requests were sent to the server.

The S3 bucket name does not need to be included when entering the list of folders in the FolderList parameter.

For more information, see Getting notified about file operations in the Amazon S3 File Gateway User Guide.

" }, "RemoveTagsFromResource":{ "name":"RemoveTagsFromResource", @@ -1034,7 +1034,7 @@ {"shape":"InvalidGatewayRequestException"}, {"shape":"InternalServerError"} ], - "documentation":"

Shuts down a gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.

If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

" + "documentation":"

Shuts down a Tape Gateway or Volume Gateway. To specify which gateway to shut down, use the Amazon Resource Name (ARN) of the gateway in the body of your request.

This API action cannot be used to shut down S3 File Gateway or FSx File Gateway.

The operation shuts down the gateway service component running in the gateway's virtual machine (VM) and not the host VM.

If you want to shut down the VM, it is recommended that you first shut down the gateway component in the VM to avoid unpredictable conditions.

After the gateway is shutdown, you cannot call any other API except StartGateway, DescribeGatewayInformation, and ListGateways. For more information, see ActivateGateway. Your applications cannot read from or write to the gateway's storage volumes, and there are no snapshots taken.

When you make a shutdown request, you will get a 200 OK success response immediately. However, it might take some time for the gateway to shut down. You can call the DescribeGatewayInformation API to check the status. For more information, see ActivateGateway.

If do not intend to use the gateway again, you must delete the gateway (using DeleteGateway) to no longer pay software charges associated with the gateway.

" }, "StartAvailabilityMonitorTest":{ "name":"StartAvailabilityMonitorTest", @@ -3808,6 +3808,14 @@ "HostEnvironmentId":{ "shape":"HostEnvironmentId", "documentation":"

A unique identifier for the specific instance of the host platform running the gateway. This value is only available for certain host environments, and its format depends on the host environment type.

" + }, + "DeprecationDate":{ + "shape":"DeprecationDate", + "documentation":"

Date after which this gateway will not receive software updates for new features and bug fixes.

" + }, + "SoftwareVersion":{ + "shape":"SoftwareVersion", + "documentation":"

The version number of the software running on the gateway appliance.

" } }, "documentation":"

Describes a gateway object.

" diff -Nru awscli-2.15.9/awscli/botocore/data/supplychain/2024-01-01/endpoint-rule-set-1.json awscli-2.15.22/awscli/botocore/data/supplychain/2024-01-01/endpoint-rule-set-1.json --- awscli-2.15.9/awscli/botocore/data/supplychain/2024-01-01/endpoint-rule-set-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/supplychain/2024-01-01/endpoint-rule-set-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scn-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scn-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scn.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://scn.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff -Nru awscli-2.15.9/awscli/botocore/data/supplychain/2024-01-01/paginators-1.json awscli-2.15.22/awscli/botocore/data/supplychain/2024-01-01/paginators-1.json --- awscli-2.15.9/awscli/botocore/data/supplychain/2024-01-01/paginators-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/supplychain/2024-01-01/paginators-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff -Nru awscli-2.15.9/awscli/botocore/data/supplychain/2024-01-01/service-2.json awscli-2.15.22/awscli/botocore/data/supplychain/2024-01-01/service-2.json --- awscli-2.15.9/awscli/botocore/data/supplychain/2024-01-01/service-2.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/supplychain/2024-01-01/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,274 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2024-01-01", + "endpointPrefix":"scn", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Supply Chain", + "serviceId":"SupplyChain", + "signatureVersion":"v4", + "signingName":"scn", + "uid":"supplychain-2024-01-01" + }, + "operations":{ + "CreateBillOfMaterialsImportJob":{ + "name":"CreateBillOfMaterialsImportJob", + "http":{ + "method":"POST", + "requestUri":"/api/configuration/instances/{instanceId}/bill-of-materials-import-jobs", + "responseCode":200 + }, + "input":{"shape":"CreateBillOfMaterialsImportJobRequest"}, + "output":{"shape":"CreateBillOfMaterialsImportJobResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

CreateBillOfMaterialsImportJob creates an import job for the Product Bill Of Materials (BOM) entity. For information on the product_bom entity, see the AWS Supply Chain User Guide.

The CSV file must be located in an Amazon S3 location accessible to AWS Supply Chain. It is recommended to use the same Amazon S3 bucket created during your AWS Supply Chain instance creation.

", + "idempotent":true + }, + "GetBillOfMaterialsImportJob":{ + "name":"GetBillOfMaterialsImportJob", + "http":{ + "method":"GET", + "requestUri":"/api/configuration/instances/{instanceId}/bill-of-materials-import-jobs/{jobId}", + "responseCode":200 + }, + "input":{"shape":"GetBillOfMaterialsImportJobRequest"}, + "output":{"shape":"GetBillOfMaterialsImportJobResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Get status and details of a BillOfMaterialsImportJob.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You do not have the required privileges to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "BillOfMaterialsImportJob":{ + "type":"structure", + "required":[ + "instanceId", + "jobId", + "status", + "s3uri" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

The BillOfMaterialsImportJob instanceId.

" + }, + "jobId":{ + "shape":"UUID", + "documentation":"

The BillOfMaterialsImportJob jobId.

" + }, + "status":{ + "shape":"ConfigurationJobStatus", + "documentation":"

The BillOfMaterialsImportJob ConfigurationJobStatus.

" + }, + "s3uri":{ + "shape":"ConfigurationS3Uri", + "documentation":"

The S3 URI from which the CSV is read.

" + }, + "message":{ + "shape":"String", + "documentation":"

When the BillOfMaterialsImportJob has reached a terminal state, there will be a message.

" + } + }, + "documentation":"

The BillOfMaterialsImportJob details.

" + }, + "ClientToken":{ + "type":"string", + "documentation":"

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "max":126, + "min":33 + }, + "ConfigurationJobStatus":{ + "type":"string", + "documentation":"

The status of the job.

", + "enum":[ + "NEW", + "FAILED", + "IN_PROGRESS", + "QUEUED", + "SUCCESS" + ] + }, + "ConfigurationS3Uri":{ + "type":"string", + "min":10, + "pattern":"[sS]3://[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]/.+" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Updating or deleting a resource can cause an inconsistent state.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateBillOfMaterialsImportJobRequest":{ + "type":"structure", + "required":[ + "instanceId", + "s3uri" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

The AWS Supply Chain instance identifier.

", + "location":"uri", + "locationName":"instanceId" + }, + "s3uri":{ + "shape":"ConfigurationS3Uri", + "documentation":"

The S3 URI of the CSV file to be imported. The bucket must grant permissions for AWS Supply Chain to read the file.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

An idempotency token.

", + "idempotencyToken":true + } + }, + "documentation":"

The request parameters for CreateBillOfMaterialsImportJob.

" + }, + "CreateBillOfMaterialsImportJobResponse":{ + "type":"structure", + "required":["jobId"], + "members":{ + "jobId":{ + "shape":"UUID", + "documentation":"

The new BillOfMaterialsImportJob identifier.

" + } + }, + "documentation":"

The response parameters of CreateBillOfMaterialsImportJob.

" + }, + "GetBillOfMaterialsImportJobRequest":{ + "type":"structure", + "required":[ + "instanceId", + "jobId" + ], + "members":{ + "instanceId":{ + "shape":"UUID", + "documentation":"

The AWS Supply Chain instance identifier.

", + "location":"uri", + "locationName":"instanceId" + }, + "jobId":{ + "shape":"UUID", + "documentation":"

The BillOfMaterialsImportJob identifier.

", + "location":"uri", + "locationName":"jobId" + } + }, + "documentation":"

The request parameters for GetBillOfMaterialsImportJob.

" + }, + "GetBillOfMaterialsImportJobResponse":{ + "type":"structure", + "required":["job"], + "members":{ + "job":{ + "shape":"BillOfMaterialsImportJob", + "documentation":"

The BillOfMaterialsImportJob.

" + } + }, + "documentation":"

The response parameters for GetBillOfMaterialsImportJob.

" + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Unexpected error during processing of request.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Request references a resource which does not exist.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Request would cause a service quota to be exceeded.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "ThrottlingException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

Request was denied due to request throttling.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "UUID":{ + "type":"string", + "max":36, + "min":36, + "pattern":"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The input does not satisfy the constraints specified by an AWS service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

AWS Supply Chain is a cloud-based application that works with your enterprise resource planning (ERP) and supply chain management systems. Using AWS Supply Chain, you can connect and extract your inventory, supply, and demand related data from existing ERP or supply chain systems into a single data model.

The AWS Supply Chain API supports configuration data import for Supply Planning.

All AWS Supply chain API operations are Amazon-authenticated and certificate-signed. They not only require the use of the AWS SDK, but also allow for the exclusive use of AWS Identity and Access Management users and roles to help facilitate access, trust, and permission policies.

" +} diff -Nru awscli-2.15.9/awscli/botocore/data/transfer/2018-11-05/service-2.json awscli-2.15.22/awscli/botocore/data/transfer/2018-11-05/service-2.json --- awscli-2.15.9/awscli/botocore/data/transfer/2018-11-05/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/transfer/2018-11-05/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -2244,6 +2244,10 @@ "SftpConfig":{ "shape":"SftpConnectorConfig", "documentation":"

A structure that contains the parameters for an SFTP connector object.

" + }, + "ServiceManagedEgressIpAddresses":{ + "shape":"ServiceManagedEgressIpAddresses", + "documentation":"

The list of egress IP addresses of this connector. These IP addresses are assigned automatically when you create the connector.

" } }, "documentation":"

Describes the parameters for the connector, as identified by the ConnectorId.

" @@ -2467,6 +2471,10 @@ "S3StorageOptions":{ "shape":"S3StorageOptions", "documentation":"

Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.

By default, home directory mappings have a TYPE of DIRECTORY. If you enable this option, you would then need to explicitly set the HomeDirectoryMapEntry Type to FILE if you want a mapping to have a file target.

" + }, + "As2ServiceManagedEgressIpAddresses":{ + "shape":"ServiceManagedEgressIpAddresses", + "documentation":"

The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs.

These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well.

" } }, "documentation":"

Describes the properties of a file transfer protocol-enabled server that was specified.

" @@ -2850,7 +2858,7 @@ }, "DirectoryId":{ "shape":"DirectoryId", - "documentation":"

The identifier of the Directory Service directory that you want to stop sharing.

" + "documentation":"

The identifier of the Directory Service directory that you want to use as your identity provider.

" }, "Function":{ "shape":"Function", @@ -4189,6 +4197,14 @@ "pattern":"s-([0-9a-f]{17})" }, "ServiceErrorMessage":{"type":"string"}, + "ServiceManagedEgressIpAddress":{ + "type":"string", + "pattern":"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}" + }, + "ServiceManagedEgressIpAddresses":{ + "type":"list", + "member":{"shape":"ServiceManagedEgressIpAddress"} + }, "ServiceMetadata":{ "type":"structure", "required":["UserDetails"], diff -Nru awscli-2.15.9/awscli/botocore/data/wafv2/2019-07-29/service-2.json awscli-2.15.22/awscli/botocore/data/wafv2/2019-07-29/service-2.json --- awscli-2.15.9/awscli/botocore/data/wafv2/2019-07-29/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/wafv2/2019-07-29/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -156,6 +156,23 @@ ], "documentation":"

Creates a WebACL per the specifications provided.

A web ACL defines a collection of rules to use to inspect and control web requests. Each rule has a statement that defines what to look for in web requests and an action that WAF applies to requests that match the statement. In the web ACL, you assign a default action to take (allow, block) for any request that does not match any of the rules. The rules in a web ACL can be a combination of the types Rule, RuleGroup, and managed rule group. You can associate a web ACL with one or more Amazon Web Services resources to protect. The resources can be an Amazon CloudFront distribution, an Amazon API Gateway REST API, an Application Load Balancer, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.

" }, + "DeleteAPIKey":{ + "name":"DeleteAPIKey", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteAPIKeyRequest"}, + "output":{"shape":"DeleteAPIKeyResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFOptimisticLockException"}, + {"shape":"WAFInvalidParameterException"}, + {"shape":"WAFInvalidOperationException"} + ], + "documentation":"

Deletes the specified API key.

After you delete a key, it can take up to 24 hours for WAF to disallow use of the key in all regions.

" + }, "DeleteFirewallManagerRuleGroups":{ "name":"DeleteFirewallManagerRuleGroups", "http":{ @@ -1680,7 +1697,7 @@ }, "TokenDomains":{ "shape":"APIKeyTokenDomains", - "documentation":"

The client application domains that you want to use this API key for.

Example JSON: \"TokenDomains\": [\"abc.com\", \"store.abc.com\"]

Public suffixes aren't allowed. For example, you can't use usa.gov or co.uk as token domains.

" + "documentation":"

The client application domains that you want to use this API key for.

Example JSON: \"TokenDomains\": [\"abc.com\", \"store.abc.com\"]

Public suffixes aren't allowed. For example, you can't use gov.au or co.uk as token domains.

" } } }, @@ -1879,7 +1896,7 @@ }, "TokenDomains":{ "shape":"TokenDomains", - "documentation":"

Specifies the domains that WAF should accept in a web request token. This enables the use of tokens across multiple protected websites. When WAF provides a token, it uses the domain of the Amazon Web Services resource that the web ACL is protecting. If you don't specify a list of token domains, WAF accepts tokens only for the domain of the protected resource. With a token domain list, WAF accepts the resource's host domain plus all domains in the token domain list, including their prefixed subdomains.

Example JSON: \"TokenDomains\": { \"mywebsite.com\", \"myotherwebsite.com\" }

Public suffixes aren't allowed. For example, you can't use usa.gov or co.uk as token domains.

" + "documentation":"

Specifies the domains that WAF should accept in a web request token. This enables the use of tokens across multiple protected websites. When WAF provides a token, it uses the domain of the Amazon Web Services resource that the web ACL is protecting. If you don't specify a list of token domains, WAF accepts tokens only for the domain of the protected resource. With a token domain list, WAF accepts the resource's host domain plus all domains in the token domain list, including their prefixed subdomains.

Example JSON: \"TokenDomains\": { \"mywebsite.com\", \"myotherwebsite.com\" }

Public suffixes aren't allowed. For example, you can't use gov.au or co.uk as token domains.

" }, "AssociationConfig":{ "shape":"AssociationConfig", @@ -2005,6 +2022,28 @@ }, "documentation":"

In a WebACL, this is the action that you want WAF to perform when a web request doesn't match any of the rules in the WebACL. The default action must be a terminating action.

" }, + "DeleteAPIKeyRequest":{ + "type":"structure", + "required":[ + "Scope", + "APIKey" + ], + "members":{ + "Scope":{ + "shape":"Scope", + "documentation":"

Specifies whether this is for an Amazon CloudFront distribution or for a regional application. A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync GraphQL API, an Amazon Cognito user pool, an App Runner service, or an Amazon Web Services Verified Access instance.

To work with CloudFront, you must also specify the Region US East (N. Virginia) as follows:

" + }, + "APIKey":{ + "shape":"APIKey", + "documentation":"

The encrypted API key that you want to delete.

" + } + } + }, + "DeleteAPIKeyResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteFirewallManagerRuleGroupsRequest":{ "type":"structure", "required":[ @@ -6054,7 +6093,7 @@ }, "TokenDomains":{ "shape":"TokenDomains", - "documentation":"

Specifies the domains that WAF should accept in a web request token. This enables the use of tokens across multiple protected websites. When WAF provides a token, it uses the domain of the Amazon Web Services resource that the web ACL is protecting. If you don't specify a list of token domains, WAF accepts tokens only for the domain of the protected resource. With a token domain list, WAF accepts the resource's host domain plus all domains in the token domain list, including their prefixed subdomains.

Example JSON: \"TokenDomains\": { \"mywebsite.com\", \"myotherwebsite.com\" }

Public suffixes aren't allowed. For example, you can't use usa.gov or co.uk as token domains.

" + "documentation":"

Specifies the domains that WAF should accept in a web request token. This enables the use of tokens across multiple protected websites. When WAF provides a token, it uses the domain of the Amazon Web Services resource that the web ACL is protecting. If you don't specify a list of token domains, WAF accepts tokens only for the domain of the protected resource. With a token domain list, WAF accepts the resource's host domain plus all domains in the token domain list, including their prefixed subdomains.

Example JSON: \"TokenDomains\": { \"mywebsite.com\", \"myotherwebsite.com\" }

Public suffixes aren't allowed. For example, you can't use gov.au or co.uk as token domains.

" }, "AssociationConfig":{ "shape":"AssociationConfig", diff -Nru awscli-2.15.9/awscli/botocore/data/wisdom/2020-10-19/service-2.json awscli-2.15.22/awscli/botocore/data/wisdom/2020-10-19/service-2.json --- awscli-2.15.9/awscli/botocore/data/wisdom/2020-10-19/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/wisdom/2020-10-19/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -352,7 +352,9 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves recommendations for the specified session. To avoid retrieving the same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the default behavior and only returns recommendations already available. To perform a manual query against an assistant, use QueryAssistant.

" + "documentation":"

Retrieves recommendations for the specified session. To avoid retrieving the same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the default behavior and only returns recommendations already available. To perform a manual query against an assistant, use QueryAssistant.

", + "deprecated":true, + "deprecatedMessage":"GetRecommendations API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications." }, "GetSession":{ "name":"GetSession", @@ -509,7 +511,9 @@ {"shape":"AccessDeniedException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Performs a manual search against the specified assistant. To retrieve recommendations for an assistant, use GetRecommendations.

" + "documentation":"

Performs a manual search against the specified assistant. To retrieve recommendations for an assistant, use GetRecommendations.

", + "deprecated":true, + "deprecatedMessage":"QueryAssistant API will be discontinued starting June 1, 2024. To receive generative responses after March 1, 2024 you will need to create a new Assistant in the Connect console and integrate the Amazon Q in Connect JavaScript library (amazon-q-connectjs) into your applications." }, "RemoveKnowledgeBaseTemplateUri":{ "name":"RemoveKnowledgeBaseTemplateUri", diff -Nru awscli-2.15.9/awscli/botocore/data/workspaces/2015-04-08/service-2.json awscli-2.15.22/awscli/botocore/data/workspaces/2015-04-08/service-2.json --- awscli-2.15.9/awscli/botocore/data/workspaces/2015-04-08/service-2.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/botocore/data/workspaces/2015-04-08/service-2.json 2024-02-21 17:34:54.000000000 +0000 @@ -258,7 +258,7 @@ {"shape":"ResourceLimitExceededException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"

Creates one or more WorkSpaces.

This operation is asynchronous and returns before the WorkSpaces are created.

  • The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

  • You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles.

  • Ensure you review your running mode to ensure you are using a running mode that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?

" + "documentation":"

Creates one or more WorkSpaces.

This operation is asynchronous and returns before the WorkSpaces are created.

  • The MANUAL running mode value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

  • You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles.

" }, "DeleteClientBranding":{ "name":"DeleteClientBranding", @@ -3074,6 +3074,10 @@ "NextToken":{ "shape":"PaginationToken", "documentation":"

If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results.

" + }, + "WorkspaceName":{ + "shape":"WorkspaceName", + "documentation":"

The name of the user-decoupled WorkSpace.

" } } }, @@ -4955,6 +4959,10 @@ "shape":"BooleanObject", "documentation":"

Indicates whether the data stored on the root volume is encrypted.

" }, + "WorkspaceName":{ + "shape":"WorkspaceName", + "documentation":"

The name of the user-decoupled WorkSpace.

" + }, "WorkspaceProperties":{ "shape":"WorkspaceProperties", "documentation":"

The properties of the WorkSpace.

" @@ -5395,12 +5403,16 @@ "type":"list", "member":{"shape":"Workspace"} }, + "WorkspaceName":{ + "type":"string", + "pattern":"^[a-zA-Z0-9_()][a-zA-Z0-9_.()-]{1,63}$" + }, "WorkspaceProperties":{ "type":"structure", "members":{ "RunningMode":{ "shape":"RunningMode", - "documentation":"

The running mode. For more information, see Manage the WorkSpace Running Mode.

  • The MANUAL value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

  • Ensure you review your running mode to ensure you are using a running mode that is optimal for your needs and budget. For more information on switching running modes, see Can I switch between hourly and monthly billing?

" + "documentation":"

The running mode. For more information, see Manage the WorkSpace Running Mode.

The MANUAL value is only supported by Amazon WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core.

" }, "RunningModeAutoStopTimeoutInMinutes":{ "shape":"RunningModeAutoStopTimeoutInMinutes", @@ -5443,7 +5455,7 @@ }, "UserName":{ "shape":"UserName", - "documentation":"

The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace.

" + "documentation":"

The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace.

The reserved keyword, [UNDEFINED], is used when creating user-decoupled WorkSpaces.

" }, "BundleId":{ "shape":"BundleId", @@ -5468,6 +5480,10 @@ "Tags":{ "shape":"TagList", "documentation":"

The tags for the WorkSpace.

" + }, + "WorkspaceName":{ + "shape":"WorkspaceName", + "documentation":"

The name of the user-decoupled WorkSpace.

" } }, "documentation":"

Describes the information used to create a WorkSpace.

" diff -Nru awscli-2.15.9/awscli/customizations/arguments.py awscli-2.15.22/awscli/customizations/arguments.py --- awscli-2.15.9/awscli/customizations/arguments.py 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/customizations/arguments.py 2024-02-21 17:34:54.000000000 +0000 @@ -14,6 +14,7 @@ import re from awscli.arguments import CustomArgument +from awscli.compat import compat_open from awscli.customizations.exceptions import ParamValidationError import jmespath @@ -127,12 +128,20 @@ """ if is_parsed_result_successful(parsed): contents = jmespath.search(self.query, parsed) - with open(self.value, 'w') as fp: + with compat_open( + self.value, 'w', access_permissions=self.perm) as fp: # Don't write 'None' to a file -- write ''. if contents is None: fp.write('') else: fp.write(contents) + # Even though the file is opened using the requested mode + # (e.g. 0o600), the mode is only applied if a new file is + # created. This means if the file already exists, its + # permissions will not be changed. So, the os.chmod call is + # retained here to preserve behavior of this argument always + # clobbering a preexisting file's permissions to the desired + # mode. os.chmod(self.value, self.perm) diff -Nru awscli-2.15.9/awscli/customizations/s3/subcommands.py awscli-2.15.22/awscli/customizations/s3/subcommands.py --- awscli-2.15.9/awscli/customizations/s3/subcommands.py 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/customizations/s3/subcommands.py 2024-02-21 17:34:54.000000000 +0000 @@ -15,6 +15,7 @@ import sys from botocore.client import Config +from botocore.utils import is_s3express_bucket from dateutil.parser import parse from dateutil.tz import tzlocal @@ -1199,6 +1200,21 @@ self._validate_streaming_paths() self._validate_path_args() self._validate_sse_c_args() + self._validate_not_s3_express_bucket_for_sync() + + def _validate_not_s3_express_bucket_for_sync(self): + if self.cmd == 'sync' and \ + (self._is_s3express_path(self.parameters['src']) or + self._is_s3express_path(self.parameters['dest'])): + raise ParamValidationError( + "Cannot use sync command with a directory bucket." + ) + + def _is_s3express_path(self, path): + if path.startswith("s3://"): + bucket = split_s3_bucket_key(path)[0] + return is_s3express_bucket(bucket) + return False def _validate_streaming_paths(self): self.parameters['is_stream'] = False diff -Nru awscli-2.15.9/awscli/examples/autoscaling/describe-auto-scaling-groups.rst awscli-2.15.22/awscli/examples/autoscaling/describe-auto-scaling-groups.rst --- awscli-2.15.9/awscli/examples/autoscaling/describe-auto-scaling-groups.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/autoscaling/describe-auto-scaling-groups.rst 2024-02-21 17:34:54.000000000 +0000 @@ -45,16 +45,17 @@ } } ], - "CreatedTime": "2020-10-28T02:39:22.152Z", - "VPCZoneIdentifier": "subnet-5ea0c127,subnet-6194ea3b,subnet-c934b782", + "CreatedTime": "2023-10-28T02:39:22.152Z", "SuspendedProcesses": [], + "VPCZoneIdentifier": "subnet-5ea0c127,subnet-6194ea3b,subnet-c934b782", "EnabledMetrics": [], "Tags": [], "TerminationPolicies": [ "Default" ], "NewInstancesProtectedFromScaleIn": false, - "ServiceLinkedRoleARN":"arn" + "ServiceLinkedRoleARN":"arn", + "TrafficSources": [] } ] } @@ -90,6 +91,61 @@ If the output includes a ``NextToken`` field, there are more groups. To get the additional groups, use the value of this field with the ``--starting-token`` option in a subsequent call as follows. :: - aws autoscaling describe-auto-scaling-groups --starting-token Z3M3LMPEXAMPLE + aws autoscaling describe-auto-scaling-groups \ + --starting-token Z3M3LMPEXAMPLE See example 1 for sample output. + +**Example 5: To describe Auto Scaling groups that use launch configurations** + +This example uses the ``--query`` option to describe Auto Scaling groups that use launch configurations. :: + + aws autoscaling describe-auto-scaling-groups \ + --query 'AutoScalingGroups[?LaunchConfigurationName!=`null`]' + +Output:: + + [ + { + "AutoScalingGroupName": "my-asg", + "AutoScalingGroupARN": "arn:aws:autoscaling:us-west-2:123456789012:autoScalingGroup:930d940e-891e-4781-a11a-7b0acd480f03:autoScalingGroupName/my-asg", + "LaunchConfigurationName": "my-lc", + "MinSize": 0, + "MaxSize": 1, + "DesiredCapacity": 1, + "DefaultCooldown": 300, + "AvailabilityZones": [ + "us-west-2a", + "us-west-2b", + "us-west-2c" + ], + "LoadBalancerNames": [], + "TargetGroupARNs": [], + "HealthCheckType": "EC2", + "HealthCheckGracePeriod": 0, + "Instances": [ + { + "InstanceId": "i-088c57934a6449037", + "InstanceType": "t2.micro", + "AvailabilityZone": "us-west-2c", + "HealthStatus": "Healthy", + "LifecycleState": "InService", + "LaunchConfigurationName": "my-lc", + "ProtectedFromScaleIn": false + } + ], + "CreatedTime": "2023-10-28T02:39:22.152Z", + "SuspendedProcesses": [], + "VPCZoneIdentifier": "subnet-5ea0c127,subnet-6194ea3b,subnet-c934b782", + "EnabledMetrics": [], + "Tags": [], + "TerminationPolicies": [ + "Default" + ], + "NewInstancesProtectedFromScaleIn": false, + "ServiceLinkedRoleARN":"arn", + "TrafficSources": [] + } + ] + +For more information, see `Filter AWS CLI output `__ in the *AWS Command Line Interface User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/autoscaling/describe-auto-scaling-instances.rst awscli-2.15.22/awscli/examples/autoscaling/describe-auto-scaling-instances.rst --- awscli-2.15.9/awscli/examples/autoscaling/describe-auto-scaling-instances.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/autoscaling/describe-auto-scaling-instances.rst 2024-02-21 17:34:54.000000000 +0000 @@ -30,29 +30,36 @@ This example uses the ``--max-items`` option to specify how many instances to return with this call. :: - aws autoscaling describe-auto-scaling-instances --max-items 1 + aws autoscaling describe-auto-scaling-instances \ + --max-items 1 If the output includes a ``NextToken`` field, there are more instances. To get the additional instances, use the value of this field with the ``--starting-token`` option in a subsequent call as follows. :: - aws autoscaling describe-auto-scaling-instances --starting-token Z3M3LMPEXAMPLE + aws autoscaling describe-auto-scaling-instances \ + --starting-token Z3M3LMPEXAMPLE + +See example 1 for sample output. + +**Example 3: To describe instances that use launch configurations** + +This example uses the ``--query`` option to describe instances that use launch configurations. :: + + aws autoscaling describe-auto-scaling-instances \ + --query 'AutoScalingInstances[?LaunchConfigurationName!=`null`]' Output:: - { - "AutoScalingInstances": [ - { - "InstanceId": "i-06905f55584de02da", - "InstanceType": "t2.micro", - "AutoScalingGroupName": "my-asg", - "AvailabilityZone": "us-west-2b", - "LifecycleState": "InService", - "HealthStatus": "HEALTHY", - "ProtectedFromScaleIn": false, - "LaunchTemplate": { - "LaunchTemplateId": "lt-1234567890abcde12", - "LaunchTemplateName": "my-launch-template", - "Version": "1" - } - } - ] - } \ No newline at end of file + [ + { + "InstanceId": "i-088c57934a6449037", + "InstanceType": "t2.micro", + "AutoScalingGroupName": "my-asg", + "AvailabilityZone": "us-west-2c", + "LifecycleState": "InService", + "HealthStatus": "HEALTHY", + "LaunchConfigurationName": "my-lc", + "ProtectedFromScaleIn": false + } + ] + +For more information, see `Filter AWS CLI output `__ in the *AWS Command Line Interface User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/autoscaling/describe-scheduled-actions.rst awscli-2.15.22/awscli/examples/autoscaling/describe-scheduled-actions.rst --- awscli-2.15.9/awscli/examples/autoscaling/describe-scheduled-actions.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/autoscaling/describe-scheduled-actions.rst 2024-02-21 17:34:54.000000000 +0000 @@ -13,16 +13,17 @@ "ScheduledActionName": "my-recurring-action", "Recurrence": "30 0 1 1,6,12 *", "ScheduledActionARN": "arn:aws:autoscaling:us-west-2:123456789012:scheduledUpdateGroupAction:8e86b655-b2e6-4410-8f29-b4f094d6871c:autoScalingGroupName/my-asg:scheduledActionName/my-recurring-action", - "StartTime": "2020-12-01T00:30:00Z", - "Time": "2020-12-01T00:30:00Z", + "StartTime": "2023-12-01T04:00:00Z", + "Time": "2023-12-01T04:00:00Z", "MinSize": 1, "MaxSize": 6, - "DesiredCapacity": 4 + "DesiredCapacity": 4, + "TimeZone": "America/New_York" } ] } -For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. +For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. **Example 2: To describe scheduled actions for the specified group** @@ -40,16 +41,17 @@ "ScheduledActionName": "my-recurring-action", "Recurrence": "30 0 1 1,6,12 *", "ScheduledActionARN": "arn:aws:autoscaling:us-west-2:123456789012:scheduledUpdateGroupAction:8e86b655-b2e6-4410-8f29-b4f094d6871c:autoScalingGroupName/my-asg:scheduledActionName/my-recurring-action", - "StartTime": "2020-12-01T00:30:00Z", - "Time": "2020-12-01T00:30:00Z", + "StartTime": "2023-12-01T04:00:00Z", + "Time": "2023-12-01T04:00:00Z", "MinSize": 1, "MaxSize": 6, - "DesiredCapacity": 4 + "DesiredCapacity": 4, + "TimeZone": "America/New_York" } ] } -For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. +For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. **Example 3: To describe the specified scheduled action** @@ -67,24 +69,24 @@ "ScheduledActionName": "my-recurring-action", "Recurrence": "30 0 1 1,6,12 *", "ScheduledActionARN": "arn:aws:autoscaling:us-west-2:123456789012:scheduledUpdateGroupAction:8e86b655-b2e6-4410-8f29-b4f094d6871c:autoScalingGroupName/my-asg:scheduledActionName/my-recurring-action", - "StartTime": "2020-12-01T00:30:00Z", - "Time": "2020-12-01T00:30:00Z", + "StartTime": "2023-12-01T04:00:00Z", + "Time": "2023-12-01T04:00:00Z", "MinSize": 1, "MaxSize": 6, - "DesiredCapacity": 4 + "DesiredCapacity": 4, + "TimeZone": "America/New_York" } ] } +For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. -For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. - -**Example 4: To describe scheduled actions with a sepecified start time** +**Example 4: To describe scheduled actions with a specified start time** To describe the scheduled actions that start at a specific time, use the ``--start-time`` option. :: aws autoscaling describe-scheduled-actions \ - --start-time "2020-12-01T00:30:00Z" + --start-time "2023-12-01T04:00:00Z" Output:: @@ -95,24 +97,24 @@ "ScheduledActionName": "my-recurring-action", "Recurrence": "30 0 1 1,6,12 *", "ScheduledActionARN": "arn:aws:autoscaling:us-west-2:123456789012:scheduledUpdateGroupAction:8e86b655-b2e6-4410-8f29-b4f094d6871c:autoScalingGroupName/my-asg:scheduledActionName/my-recurring-action", - "StartTime": "2020-12-01T00:30:00Z", - "Time": "2020-12-01T00:30:00Z", + "StartTime": "2023-12-01T04:00:00Z", + "Time": "2023-12-01T04:00:00Z", "MinSize": 1, "MaxSize": 6, - "DesiredCapacity": 4 + "DesiredCapacity": 4, + "TimeZone": "America/New_York" } ] } - -For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. +For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. **Example 5: To describe scheduled actions that end at a specified time** To describe the scheduled actions that end at a specific time, use the ``--end-time`` option. :: aws autoscaling describe-scheduled-actions \ - --end-time "2022-12-01T00:30:00Z" + --end-time "2023-12-01T04:00:00Z" Output:: @@ -123,23 +125,25 @@ "ScheduledActionName": "my-recurring-action", "Recurrence": "30 0 1 1,6,12 *", "ScheduledActionARN": "arn:aws:autoscaling:us-west-2:123456789012:scheduledUpdateGroupAction:8e86b655-b2e6-4410-8f29-b4f094d6871c:autoScalingGroupName/my-asg:scheduledActionName/my-recurring-action", - "StartTime": "2020-12-01T00:30:00Z", - "Time": "2020-12-01T00:30:00Z", + "StartTime": "2023-12-01T04:00:00Z", + "Time": "2023-12-01T04:00:00Z", "MinSize": 1, "MaxSize": 6, - "DesiredCapacity": 4 + "DesiredCapacity": 4, + "TimeZone": "America/New_York" } ] } -For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. +For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. **Example 6: To describe a specified number of scheduled actions** To return a specific number of scheduled actions, use the ``--max-items`` option. :: aws autoscaling describe-scheduled-actions \ - --auto-scaling-group-name my-asg --max-items 1 + --auto-scaling-group-name my-asg \ + --max-items 1 Output:: @@ -150,11 +154,12 @@ "ScheduledActionName": "my-recurring-action", "Recurrence": "30 0 1 1,6,12 *", "ScheduledActionARN": "arn:aws:autoscaling:us-west-2:123456789012:scheduledUpdateGroupAction:8e86b655-b2e6-4410-8f29-b4f094d6871c:autoScalingGroupName/my-asg:scheduledActionName/my-recurring-action", - "StartTime": "2020-12-01T00:30:00Z", - "Time": "2020-12-01T00:30:00Z", + "StartTime": "2023-12-01T04:00:00Z", + "Time": "2023-12-01T04:00:00Z", "MinSize": 1, "MaxSize": 6, - "DesiredCapacity": 4 + "DesiredCapacity": 4, + "TimeZone": "America/New_York" } ] } @@ -165,4 +170,4 @@ --auto-scaling-group-name my-asg \ --starting-token Z3M3LMPEXAMPLE -For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. +For more information, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/autoscaling/put-scheduled-update-group-action.rst awscli-2.15.22/awscli/examples/autoscaling/put-scheduled-update-group-action.rst --- awscli-2.15.9/awscli/examples/autoscaling/put-scheduled-update-group-action.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/autoscaling/put-scheduled-update-group-action.rst 2024-02-21 17:34:54.000000000 +0000 @@ -5,14 +5,14 @@ aws autoscaling put-scheduled-update-group-action \ --auto-scaling-group-name my-asg \ --scheduled-action-name my-scheduled-action \ - --start-time "2021-05-12T08:00:00Z" \ + --start-time "2023-05-12T08:00:00Z" \ --min-size 2 \ --max-size 6 \ --desired-capacity 4 This command produces no output. If a scheduled action with the same name already exists, it will be overwritten by the new scheduled action. -For more examples, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. +For more examples, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. **Example 2: To specify a recurring schedule** @@ -28,4 +28,4 @@ This command produces no output. If a scheduled action with the same name already exists, it will be overwritten by the new scheduled action. -For more examples, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. \ No newline at end of file +For more examples, see `Scheduled scaling `__ in the *Amazon EC2 Auto Scaling User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/cognito-idp/set-user-mfa-preference.rst awscli-2.15.22/awscli/examples/cognito-idp/set-user-mfa-preference.rst --- awscli-2.15.9/awscli/examples/cognito-idp/set-user-mfa-preference.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/cognito-idp/set-user-mfa-preference.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,8 +1,12 @@ -**To set user MFA settings** - -This example modifies the MFA delivery options. It changes the MFA delivery medium to SMS. - -Command:: - - aws cognito-idp set-user-mfa-preference --access-token ACCESS_TOKEN --mfa-options DeliveryMedium="SMS",AttributeName="phone_number" - +**To set user MFA settings** + +The following ``set-user-mfa-preference`` example modifies the MFA delivery options. It changes the MFA delivery medium to SMS. :: + + aws cognito-idp set-user-mfa-preference \ + --access-token "eyJra12345EXAMPLE" \ + --software-token-mfa-settings Enabled=true,PreferredMfa=true \ + --sms-mfa-settings Enabled=false,PreferredMfa=false + +This command produces no output. + +For more information, see `Adding MFA to a user pool `__ in the *Amazon Cognito Developer Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/create-coip-cidr.rst awscli-2.15.22/awscli/examples/ec2/create-coip-cidr.rst --- awscli-2.15.9/awscli/examples/ec2/create-coip-cidr.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/create-coip-cidr.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,19 @@ +**To create a range of customer-owned IP (CoIP) addresses** + +The following ``create-coip-cidr`` example creates the specified range of CoIP addresses in the specified CoIP pool. :: + + aws ec2 create-coip-cidr \ + --cidr 15.0.0.0/24 \ + --coip-pool-id ipv4pool-coip-1234567890abcdefg + +Output:: + + { + "CoipCidr": { + "Cidr": "15.0.0.0/24", + "CoipPoolId": "ipv4pool-coip-1234567890abcdefg", + "LocalGatewayRouteTableId": "lgw-rtb-abcdefg1234567890" + } + } + +For more information, see `Customer-owned IP addresses `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/create-coip-pool.rst awscli-2.15.22/awscli/examples/ec2/create-coip-pool.rst --- awscli-2.15.9/awscli/examples/ec2/create-coip-pool.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/create-coip-pool.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,18 @@ +**To create a pool of customer-owned IP (CoIP) addresses** + +The following ``create-coip-pool`` example creates a CoIP pool for CoIP addresses in the specified local gateway route table. :: + + aws ec2 create-coip-pool \ + --local-gateway-route-table-id lgw-rtb-abcdefg1234567890 + +Output:: + + { + "CoipPool": { + "PoolId": "ipv4pool-coip-1234567890abcdefg", + "LocalGatewayRouteTableId": "lgw-rtb-abcdefg1234567890", + "PoolArn": "arn:aws:ec2:us-west-2:123456789012:coip-pool/ipv4pool-coip-1234567890abcdefg" + } + } + +For more information, see `Customer-owned IP addresses `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/create-local-gateway-route-table-virtual-interface-group-association.rst awscli-2.15.22/awscli/examples/ec2/create-local-gateway-route-table-virtual-interface-group-association.rst --- awscli-2.15.9/awscli/examples/ec2/create-local-gateway-route-table-virtual-interface-group-association.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/create-local-gateway-route-table-virtual-interface-group-association.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,24 @@ +**To associate a local gateway route table with a virtual interfaces (VIFs) group** + +The following ``create-local-gateway-route-table-virtual-interface-group-association`` example creates an association between the specified local gateway route table and VIF group. :: + + aws ec2 create-local-gateway-route-table-virtual-interface-group-association \ + --local-gateway-route-table-id lgw-rtb-exampleidabcd1234 \ + --local-gateway-virtual-interface-group-id lgw-vif-grp-exampleid0123abcd + +Output:: + + { + "LocalGatewayRouteTableVirtualInterfaceGroupAssociation": { + "LocalGatewayRouteTableVirtualInterfaceGroupAssociationId": "lgw-vif-grp-assoc-exampleid12345678", + "LocalGatewayVirtualInterfaceGroupId": "lgw-vif-grp-exampleid0123abcd", + "LocalGatewayId": "lgw-exampleid11223344", + "LocalGatewayRouteTableId": "lgw-rtb-exampleidabcd1234", + "LocalGatewayRouteTableArn": "arn:aws:ec2:us-west-2:111122223333:local-gateway-route-table/lgw-rtb-exampleidabcd1234", + "OwnerId": "111122223333", + "State": "pending", + "Tags": [] + } + } + +For more information, see `VIF group associations `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/create-local-gateway-route-table.rst awscli-2.15.22/awscli/examples/ec2/create-local-gateway-route-table.rst --- awscli-2.15.9/awscli/examples/ec2/create-local-gateway-route-table.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/create-local-gateway-route-table.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,24 @@ +**To create a local gateway route table** + +The following ``create-local-gateway-route-table`` example creates a local gateway route table with the direct VPC routing mode. :: + + aws ec2 create-local-gateway-route-table \ + --local-gateway-id lgw-1a2b3c4d5e6f7g8h9 \ + --mode direct-vpc-routing + +Output:: + + { + "LocalGatewayRouteTable": { + "LocalGatewayRouteTableId": "lgw-rtb-abcdefg1234567890", + "LocalGatewayRouteTableArn": "arn:aws:ec2:us-west-2:111122223333:local-gateway-route-table/lgw-rtb-abcdefg1234567890", + "LocalGatewayId": "lgw-1a2b3c4d5e6f7g8h9", + "OutpostArn": "arn:aws:outposts:us-west-2:111122223333:outpost/op-021345abcdef67890", + "OwnerId": "111122223333", + "State": "pending", + "Tags": [], + "Mode": "direct-vpc-routing" + } + } + +For more information, see `Local gateway route tables `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/create-tags.rst awscli-2.15.22/awscli/examples/ec2/create-tags.rst --- awscli-2.15.9/awscli/examples/ec2/create-tags.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/create-tags.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,11 +1,14 @@ -**To add a tag to a resource** +**Example 1: To add a tag to a resource** The following ``create-tags`` example adds the tag ``Stack=production`` to the specified image, or overwrites an existing tag for the AMI where the tag key is ``Stack``. :: aws ec2 create-tags \ - --resources ami-1234567890abcdef0 --tags Key=Stack,Value=production + --resources ami-1234567890abcdef0 \ + --tags Key=Stack,Value=production -**To add tags to multiple resources** +For more information, see `This is the topic title `__ in the *Amazon Elastic Compute Cloud User Guide for Linux Instances*. + +**Example 2: To add tags to multiple resources** The following ``create-tags`` example adds (or overwrites) two tags for an AMI and an instance. One of the tags has a key (``webserver``) but no value (value is set to an empty string). The other tag has a key (``stack``) and a value (``Production``). :: @@ -13,7 +16,9 @@ --resources ami-1a2b3c4d i-1234567890abcdef0 \ --tags Key=webserver,Value= Key=stack,Value=Production -**To add tags containing special characters** +For more information, see `This is the topic title `__ in the *Amazon Elastic Compute Cloud User Guide for Linux Instances*. + +**Example 3: To add tags containing special characters** The following ``create-tags`` example adds the tag ``[Group]=test`` for an instance. The square brackets ([ and ]) are special characters, and must be escaped. The following examples also use the line continuation character appropriate for each environment. @@ -23,7 +28,7 @@ --resources i-1234567890abcdef0 ^ --tags Key=\"[Group]\",Value=test -If you are using Windows PowerShell, element the value that has special characters with double quotes ("), precede each double quote character with a backslash (\\), and then surround the entire key and value structure with single quotes (') as follows:: +If you are using Windows PowerShell, surround the element the value that has special characters with double quotes ("), precede each double quote character with a backslash (\\), and then surround the entire key and value structure with single quotes (') as follows:: aws ec2 create-tags ` --resources i-1234567890abcdef0 ` @@ -34,3 +39,5 @@ aws ec2 create-tags \ --resources i-1234567890abcdef0 \ --tags 'Key="[Group]",Value=test' + +For more information, see `This is the topic title `__ in the *Amazon Elastic Compute Cloud User Guide for Linux Instances*. diff -Nru awscli-2.15.9/awscli/examples/ec2/delete-coip-cidr.rst awscli-2.15.22/awscli/examples/ec2/delete-coip-cidr.rst --- awscli-2.15.9/awscli/examples/ec2/delete-coip-cidr.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/delete-coip-cidr.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,19 @@ +**To delete a range of customer-owned IP (CoIP) addresses** + +The following ``delete-coip-cidr`` example deletes the specified range of CoIP addresses in the specified CoIP pool. :: + + aws ec2 delete-coip-cidr \ + --cidr 14.0.0.0/24 \ + --coip-pool-id ipv4pool-coip-1234567890abcdefg + +Output:: + + { + "CoipCidr": { + "Cidr": "14.0.0.0/24", + "CoipPoolId": "ipv4pool-coip-1234567890abcdefg", + "LocalGatewayRouteTableId": "lgw-rtb-abcdefg1234567890" + } + } + +For more information, see `Customer-owned IP addresses `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/delete-coip-pool.rst awscli-2.15.22/awscli/examples/ec2/delete-coip-pool.rst --- awscli-2.15.9/awscli/examples/ec2/delete-coip-pool.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/delete-coip-pool.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,18 @@ +**To delete a pool of customer-owned IP (CoIP) addresses** + +The following ``delete-coip-pool`` example deletes a CoIP pool of CoIP addresses. :: + + aws ec2 delete-coip-pool \ + --coip-pool-id ipv4pool-coip-1234567890abcdefg + +Output:: + + { + "CoipPool": { + "PoolId": "ipv4pool-coip-1234567890abcdefg", + "LocalGatewayRouteTableId": "lgw-rtb-abcdefg1234567890", + "PoolArn": "arn:aws:ec2:us-west-2:123456789012:coip-pool/ipv4pool-coip-1234567890abcdefg" + } + } + +For more information, see `Customer-owned IP addresses `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/delete-local-gateway-route-table-virtual-interface-group-association.rst awscli-2.15.22/awscli/examples/ec2/delete-local-gateway-route-table-virtual-interface-group-association.rst --- awscli-2.15.9/awscli/examples/ec2/delete-local-gateway-route-table-virtual-interface-group-association.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/delete-local-gateway-route-table-virtual-interface-group-association.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,23 @@ +**To disassociate a local gateway route table from a virtual interfaces (VIFs) group** + +The following ``delete-local-gateway-route-table-virtual-interface-group-association`` example deletes the association between the specified local gateway route table and VIF group. :: + + aws ec2 delete-local-gateway-route-table-virtual-interface-group-association \ + --local-gateway-route-table-virtual-interface-group-association-id lgw-vif-grp-assoc-exampleid12345678 + +Output:: + + { + "LocalGatewayRouteTableVirtualInterfaceGroupAssociation": { + "LocalGatewayRouteTableVirtualInterfaceGroupAssociationId": "lgw-vif-grp-assoc-exampleid12345678", + "LocalGatewayVirtualInterfaceGroupId": "lgw-vif-grp-exampleid0123abcd", + "LocalGatewayId": "lgw-exampleid11223344", + "LocalGatewayRouteTableId": "lgw-rtb-exampleidabcd1234", + "LocalGatewayRouteTableArn": "arn:aws:ec2:us-west-2:111122223333:local-gateway-route-table/lgw-rtb-exampleidabcd1234", + "OwnerId": "111122223333", + "State": "disassociating", + "Tags": [] + } + } + +For more information, see `VIF group associations `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/delete-local-gateway-route-table-vpc-association.rst awscli-2.15.22/awscli/examples/ec2/delete-local-gateway-route-table-vpc-association.rst --- awscli-2.15.9/awscli/examples/ec2/delete-local-gateway-route-table-vpc-association.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/delete-local-gateway-route-table-vpc-association.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,22 @@ +**To disassociate a local gateway route table from a VPC** + +The following ``delete-local-gateway-route-table-vpc-association`` example deletes the association between the specified local gateway route table and VPC. :: + + aws ec2 delete-local-gateway-route-table-vpc-association \ + --local-gateway-route-table-vpc-association-id vpc-example0123456789 + +Output:: + + { + "LocalGatewayRouteTableVpcAssociation": { + "LocalGatewayRouteTableVpcAssociationId": "lgw-vpc-assoc-abcd1234wxyz56789", + "LocalGatewayRouteTableId": "lgw-rtb-abcdefg1234567890", + "LocalGatewayRouteTableArn": "arn:aws:ec2:us-west-2:555555555555:local-gateway-route-table/lgw-rtb-abcdefg1234567890", + "LocalGatewayId": "lgw-exampleid01234567", + "VpcId": "vpc-example0123456789", + "OwnerId": "555555555555", + "State": "disassociating" + } + } + +For more information, see `VPC associations `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/delete-local-gateway-route-table.rst awscli-2.15.22/awscli/examples/ec2/delete-local-gateway-route-table.rst --- awscli-2.15.9/awscli/examples/ec2/delete-local-gateway-route-table.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/delete-local-gateway-route-table.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,23 @@ +**To delete a local gateway route table** + +The following ``delete-local-gateway-route-table`` example creates a local gateway route table with the direct VPC routing mode. :: + + aws ec2 delete-local-gateway-route-table \ + --local-gateway-route-table-id lgw-rtb-abcdefg1234567890 + +Output:: + + { + "LocalGatewayRouteTable": { + "LocalGatewayRouteTableId": "lgw-rtb-abcdefg1234567890", + "LocalGatewayRouteTableArn": "arn:aws:ec2:us-west-2:111122223333:local-gateway-route-table/lgw-rtb-abcdefg1234567890", + "LocalGatewayId": "lgw-1a2b3c4d5e6f7g8h9", + "OutpostArn": "arn:aws:outposts:us-west-2:111122223333:outpost/op-021345abcdef67890", + "OwnerId": "111122223333", + "State": "deleting", + "Tags": [], + "Mode": "direct-vpc-routing" + } + } + +For more information, see `Local gateway route tables `__ in the *AWS Outposts User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/describe-instance-types.rst awscli-2.15.22/awscli/examples/ec2/describe-instance-types.rst --- awscli-2.15.9/awscli/examples/ec2/describe-instance-types.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/describe-instance-types.rst 2024-02-21 17:34:54.000000000 +0000 @@ -3,7 +3,7 @@ The following ``describe-instance-types`` example displays details for the specified instance type. :: aws ec2 describe-instance-types \ - --instance-types t2.micro + --instance-types t2.micro Output:: @@ -70,11 +70,15 @@ ] } +For more information, see `Instance Types `__ in *Amazon Elastic Compute Cloud +User Guide for Linux Instances*. + **Example 2: To filter the available instance types** You can specify a filter to scope the results to instance types that have a specific characteristic. The following ``describe-instance-types`` example lists the instance types that support hibernation. :: - aws ec2 describe-instance-types --filters Name=hibernation-supported,Values=true --query InstanceTypes[].InstanceType + aws ec2 describe-instance-types \ + --filters Name=hibernation-supported,Values=true --query 'InstanceTypes[*].InstanceType' Output:: @@ -95,3 +99,6 @@ "r5.4xlarge", "c5.4xlarge" ] + +For more information, see `Instance Types `__ in *Amazon Elastic Compute Cloud +User Guide for Linux Instances*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ec2/describe-security-groups.rst awscli-2.15.22/awscli/examples/ec2/describe-security-groups.rst --- awscli-2.15.9/awscli/examples/ec2/describe-security-groups.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ec2/describe-security-groups.rst 2024-02-21 17:34:54.000000000 +0000 @@ -65,7 +65,7 @@ **Example 2: To describe security groups that have specific rules** -The following ``describe-security-groups``example uses filters to scope the results to security groups that have a rule that allows SSH traffic (port 22) and a rule that allows traffic from all addresses (``0.0.0.0/0``). The example uses the ``--query`` parameter to display only the names of the security groups. Security groups must match all filters to be returned in the results; however, a single rule does not have to match all filters. For example, the output returns a security group with a rule that allows SSH traffic from a specific IP address and another rule that allows HTTP traffic from all addresses. :: +The following ``describe-security-groups`` example uses filters to scope the results to security groups that have a rule that allows SSH traffic (port 22) and a rule that allows traffic from all addresses (``0.0.0.0/0``). The example uses the ``--query`` parameter to display only the names of the security groups. Security groups must match all filters to be returned in the results; however, a single rule does not have to match all filters. For example, the output returns a security group with a rule that allows SSH traffic from a specific IP address and another rule that allows HTTP traffic from all addresses. :: aws ec2 describe-security-groups \ --filters Name=ip-permission.from-port,Values=22 Name=ip-permission.to-port,Values=22 Name=ip-permission.cidr,Values='0.0.0.0/0' \ @@ -100,4 +100,4 @@ } ] -For additional examples using tag filters, see `Working with tags `__ in the *Amazon EC2 User Guide*. +For additional examples using tag filters, see `Working with tags `__ in the *Amazon EC2 User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ecs/describe-task-definition.rst awscli-2.15.22/awscli/examples/ecs/describe-task-definition.rst --- awscli-2.15.9/awscli/examples/ecs/describe-task-definition.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ecs/describe-task-definition.rst 2024-02-21 17:34:54.000000000 +0000 @@ -2,54 +2,114 @@ The following ``describe-task-definition`` example retrieves the details of a task definition. :: - aws ecs describe-task-definition --task-definition hello_world:8 + aws ecs describe-task-definition \ + --task-definition hello_world:8 Output:: { - "taskDefinition": { - "volumes": [], - "taskDefinitionArn": "arn:aws:ecs:us-west-2:123456789012:task-definition/hello_world:8", - "containerDefinitions": [ - { - "environment": [], - "name": "wordpress", - "links": [ - "mysql" - ], - "mountPoints": [], - "image": "wordpress", - "essential": true, - "portMappings": [ - { - "containerPort": 80, - "hostPort": 80 - } - ], - "memory": 500, - "cpu": 10, - "volumesFrom": [] - }, - { - "environment": [ - { - "name": "MYSQL_ROOT_PASSWORD", - "value": "password" - } - ], - "name": "mysql", - "mountPoints": [], - "image": "mysql", - "cpu": 10, - "portMappings": [], - "memory": 500, - "essential": true, - "volumesFrom": [] + "tasks": [ + { + "attachments": [ + { + "id": "17f3dff6-a9e9-4d83-99a9-7eb5193c2634", + "type": "ElasticNetworkInterface", + "status": "ATTACHED", + "details": [ + { + "name": "subnetId", + "value": "subnet-0d0eab1bb38d5ca64" + }, + { + "name": "networkInterfaceId", + "value": "eni-0d542ffb4a12aa6d9" + }, + { + "name": "macAddress", + "value": "0e:6d:18:f6:2d:29" + }, + { + "name": "privateDnsName", + "value": "ip-10-0-1-170.ec2.internal" + }, + { + "name": "privateIPv4Address", + "value": "10.0.1.170" + } + ] + } + ], + "attributes": [ + { + "name": "ecs.cpu-architecture", + "value": "x86_64" + } + ], + "availabilityZone": "us-east-1b", + "clusterArn": "arn:aws:ecs:us-east-1:053534965804:cluster/fargate-cluster", + "connectivity": "CONNECTED", + "connectivityAt": "2023-11-28T11:10:52.907000-05:00", + "containers": [ + { + "containerArn": "arn:aws:ecs:us-east-1:053534965804:container/fargate-cluster/c524291ae4154100b601a543108b193a/772c4784-92ae-414e-8df2-03d3358e39fa", + "taskArn": "arn:aws:ecs:us-east-1:053534965804:task/fargate-cluster/c524291ae4154100b601a543108b193a", + "name": "web", + "image": "nginx", + "imageDigest": "sha256:10d1f5b58f74683ad34eb29287e07dab1e90f10af243f151bb50aa5dbb4d62ee", + "runtimeId": "c524291ae4154100b601a543108b193a-265927825", + "lastStatus": "RUNNING", + "networkBindings": [], + "networkInterfaces": [ + { + "attachmentId": "17f3dff6-a9e9-4d83-99a9-7eb5193c2634", + "privateIpv4Address": "10.0.1.170" + } + ], + "healthStatus": "HEALTHY", + "cpu": "99", + "memory": "100" + }, + { + "containerArn": "arn:aws:ecs:us-east-1:053534965804:container/fargate-cluster/c524291ae4154100b601a543108b193a/c051a779-40d2-48ca-ad5e-6ec875ceb610", + "taskArn": "arn:aws:ecs:us-east-1:053534965804:task/fargate-cluster/c524291ae4154100b601a543108b193a", + "name": "aws-guardduty-agent-FvWGoDU", + "imageDigest": "sha256:359b8b014e5076c625daa1056090e522631587a7afa3b2e055edda6bd1141017", + "runtimeId": "c524291ae4154100b601a543108b193a-505093495", + "lastStatus": "RUNNING", + "networkBindings": [], + "networkInterfaces": [ + { + "attachmentId": "17f3dff6-a9e9-4d83-99a9-7eb5193c2634", + "privateIpv4Address": "10.0.1.170" + } + ], + "healthStatus": "UNKNOWN" + } + ], + "cpu": "256", + "createdAt": "2023-11-28T11:10:49.299000-05:00", + "desiredStatus": "RUNNING", + "enableExecuteCommand": false, + "group": "family:webserver", + "healthStatus": "HEALTHY", + "lastStatus": "RUNNING", + "launchType": "FARGATE", + "memory": "512" + "platformVersion": "1.4.0", + "platformFamily": "Linux", + "pullStartedAt": "2023-11-28T11:10:59.773000-05:00", + "pullStoppedAt": "2023-11-28T11:11:12.624000-05:00", + "startedAt": "2023-11-28T11:11:20.316000-05:00", + "tags": [], + "taskArn": "arn:aws:ecs:us-east-1:053534965804:task/fargate-cluster/c524291ae4154100b601a543108b193a", + "taskDefinitionArn": "arn:aws:ecs:us-east-1:053534965804:task-definition/webserver:5", + "version": 4, + "ephemeralStorage": { + "sizeInGiB": 20 } - ], - "family": "hello_world", - "revision": 8 - } + } + ], + "failures": [] } -For more information, see `Amazon ECS Task Definitions `_ in the *Amazon ECS Developer Guide*. +For more information, see `Amazon ECS Task Definitions `_ in the *Amazon ECS Developer Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/ecs/run-task.rst awscli-2.15.22/awscli/examples/ecs/run-task.rst --- awscli-2.15.9/awscli/examples/ecs/run-task.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/ecs/run-task.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,37 +1,63 @@ **To run a task on your default cluster** -The following ``run-task`` example runs a task on the default cluster. :: +The following ``run-task`` example runs a task on the default cluster and uses a client token. :: - aws ecs run-task --cluster default --task-definition sleep360:1 + aws ecs run-task \ + --cluster default \ + --task-definition sleep360:1 \ + --client-token 550e8400-e29b-41d4-a716-446655440000 Output:: { "tasks": [ { - "taskArn": "arn:aws:ecs:us-west-2:123456789012:task/a1b2c3d4-5678-90ab-ccdef-11111EXAMPLE", + "attachments": [], + "attributes": [ + { + "name": "ecs.cpu-architecture", + "value": "x86_64" + } + ], + "availabilityZone": "us-east-1b", + "capacityProviderName": "example-capacity-provider", + "clusterArn": "arn:aws:ecs:us-east-1:123456789012:cluster/default", + "containerInstanceArn": "arn:aws:ecs:us-east-1:123456789012:container-instance/default/bc4d2ec611d04bb7bb97e83ceEXAMPLE", + "containers": [ + { + "containerArn": "arn:aws:ecs:us-east-1:123456789012:container/default/d6f51cc5bbc94a47969c92035e9f66f8/75853d2d-711e-458a-8362-0f0aEXAMPLE", + "taskArn": "arn:aws:ecs:us-east-1:123456789012:task/default/d6f51cc5bbc94a47969c9203EXAMPLE", + "name": "sleep", + "image": "busybox", + "lastStatus": "PENDING", + "networkInterfaces": [], + "cpu": "10", + "memory": "10" + } + ], + "cpu": "10", + "createdAt": "2023-11-21T16:59:34.403000-05:00", + "desiredStatus": "RUNNING", + "enableExecuteCommand": false, + "group": "family:sleep360", + "lastStatus": "PENDING", + "launchType": "EC2", + "memory": "10", "overrides": { "containerOverrides": [ { "name": "sleep" } - ] + ], + "inferenceAcceleratorOverrides": [] }, - "lastStatus": "PENDING", - "containerInstanceArn": "arn:aws:ecs:us-west-2:123456789012:container-instance/a1b2c3d4-5678-90ab-ccdef-22222EXAMPLE", - "desiredStatus": "RUNNING", - "taskDefinitionArn": "arn:aws:ecs:us-west-2:123456789012:task-definition/sleep360:1", - "containers": [ - { - "containerArn": "arn:aws:ecs:us-west-2:123456789012:container/a1b2c3d4-5678-90ab-ccdef-33333EXAMPLE", - "taskArn": "arn:aws:ecs:us-west-2:123456789012:task/a1b2c3d4-5678-90ab-ccdef-11111EXAMPLE", - "lastStatus": "PENDING", - "name": "sleep" - } - ] + "tags": [], + "taskArn": "arn:aws:ecs:us-east-1:123456789012:task/default/d6f51cc5bbc94a47969c9203EXAMPLE", + "taskDefinitionArn": "arn:aws:ecs:us-east-1:123456789012:task-definition/sleep360:1", + "version": 1 } - ] + ], + "failures": [] } - -For more information, see `Running Tasks `_ in the *Amazon ECS Developer Guide*. \ No newline at end of file +For more information, see `Running Tasks `__ in the *Amazon ECS Developer Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/s3/cp.rst awscli-2.15.22/awscli/examples/s3/cp.rst --- awscli-2.15.9/awscli/examples/s3/cp.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/cp.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,4 +1,4 @@ -**Copying a local file to S3** +**Example 1: Copying a local file to S3** The following ``cp`` command copies a single file to a specified bucket and key:: @@ -9,19 +9,19 @@ upload: test.txt to s3://mybucket/test2.txt -**Copying a local file to S3 with an expiration date** +**Example 2: Copying a local file to S3 with an expiration date** The following ``cp`` command copies a single file to a specified bucket and key that expires at the specified ISO 8601 timestamp:: - aws s3 cp test.txt s3://mybucket/test2.txt --expires 2014-10-01T20:30:00Z + aws s3 cp test.txt s3://mybucket/test2.txt \ + --expires 2014-10-01T20:30:00Z Output:: upload: test.txt to s3://mybucket/test2.txt - -**Copying a file from S3 to S3** +**Example 3: Copying a file from S3 to S3** The following ``cp`` command copies a single s3 object to a specified bucket and key:: @@ -31,8 +31,7 @@ copy: s3://mybucket/test.txt to s3://mybucket/test2.txt - -**Copying an S3 object to a local file** +**Example 4: Copying an S3 object to a local file** The following ``cp`` command copies a single object to a specified file locally:: @@ -42,8 +41,7 @@ download: s3://mybucket/test.txt to test2.txt - -**Copying an S3 object from one bucket to another** +**Example 5: Copying an S3 object from one bucket to another** The following ``cp`` command copies a single object to a specified bucket while retaining its original name:: @@ -53,38 +51,43 @@ copy: s3://mybucket/test.txt to s3://mybucket2/test.txt -**Recursively copying S3 objects to a local directory** +**Example 6: Recursively copying S3 objects to a local directory** When passed with the parameter ``--recursive``, the following ``cp`` command recursively copies all objects under a specified prefix and bucket to a specified directory. In this example, the bucket ``mybucket`` has the objects ``test1.txt`` and ``test2.txt``:: - aws s3 cp s3://mybucket . --recursive + aws s3 cp s3://mybucket . \ + --recursive Output:: download: s3://mybucket/test1.txt to test1.txt download: s3://mybucket/test2.txt to test2.txt -**Recursively copying local files to S3** +**Example 7: Recursively copying local files to S3** When passed with the parameter ``--recursive``, the following ``cp`` command recursively copies all files under a specified directory to a specified bucket and prefix while excluding some files by using an ``--exclude`` parameter. In this example, the directory ``myDir`` has the files ``test1.txt`` and ``test2.jpg``:: - aws s3 cp myDir s3://mybucket/ --recursive --exclude "*.jpg" + aws s3 cp myDir s3://mybucket/ \ + --recursive \ + --exclude "*.jpg" Output:: upload: myDir/test1.txt to s3://mybucket/test1.txt -**Recursively copying S3 objects to another bucket** +**Example 8: Recursively copying S3 objects to another bucket** When passed with the parameter ``--recursive``, the following ``cp`` command recursively copies all objects under a specified bucket to another bucket while excluding some objects by using an ``--exclude`` parameter. In this example, the bucket ``mybucket`` has the objects ``test1.txt`` and ``another/test1.txt``:: - aws s3 cp s3://mybucket/ s3://mybucket2/ --recursive --exclude "another/*" + aws s3 cp s3://mybucket/ s3://mybucket2/ \ + --recursive \ + --exclude "another/*" Output:: @@ -92,19 +95,23 @@ You can combine ``--exclude`` and ``--include`` options to copy only objects that match a pattern, excluding all others:: - aws s3 cp s3://mybucket/logs/ s3://mybucket2/logs/ --recursive --exclude "*" --include "*.log" + aws s3 cp s3://mybucket/logs/ s3://mybucket2/logs/ \ + --recursive \ + --exclude "*" \ + --include "*.log" Output:: copy: s3://mybucket/logs/test/test.log to s3://mybucket2/logs/test/test.log copy: s3://mybucket/logs/test3.log to s3://mybucket2/logs/test3.log -**Setting the Access Control List (ACL) while copying an S3 object** +**Example 9: Setting the Access Control List (ACL) while copying an S3 object** The following ``cp`` command copies a single object to a specified bucket and key while setting the ACL to ``public-read-write``:: - aws s3 cp s3://mybucket/test.txt s3://mybucket/test2.txt --acl public-read-write + aws s3 cp s3://mybucket/test.txt s3://mybucket/test2.txt \ + --acl public-read-write Output:: @@ -113,7 +120,9 @@ Note that if you're using the ``--acl`` option, ensure that any associated IAM policies include the ``"s3:PutObjectAcl"`` action:: - aws iam get-user-policy --user-name myuser --policy-name mypolicy + aws iam get-user-policy \ + --user-name myuser \ + --policy-name mypolicy Output:: @@ -138,7 +147,7 @@ } } -**Granting permissions for an S3 object** +**Example 10: Granting permissions for an S3 object** The following ``cp`` command illustrates the use of the ``--grants`` option to grant read access to all users identified by URI and full control to a specific user identified by their Canonical ID:: @@ -149,7 +158,7 @@ upload: file.txt to s3://mybucket/file.txt -**Uploading a local file stream to S3** +**Example 11: Uploading a local file stream to S3** .. WARNING:: PowerShell may alter the encoding of or add a CRLF to piped input. @@ -157,13 +166,13 @@ aws s3 cp - s3://mybucket/stream.txt -**Uploading a local file stream that is larger than 50GB to S3** +**Example 12: Uploading a local file stream that is larger than 50GB to S3** The following ``cp`` command uploads a 51GB local file stream from standard input to a specified bucket and key. The ``--expected-size`` option must be provided, or the upload may fail when it reaches the default part limit of 10,000:: aws s3 cp - s3://mybucket/stream.txt --expected-size 54760833024 -**Downloading an S3 object as a local file stream** +**Example 13: Downloading an S3 object as a local file stream** .. WARNING:: PowerShell may alter the encoding of or add a CRLF to piped or redirected output. @@ -171,7 +180,7 @@ aws s3 cp s3://mybucket/stream.txt - -**Uploading to an S3 access point** +**Example 14: Uploading to an S3 access point** The following ``cp`` command uploads a single file (``mydoc.txt``) to the access point (``myaccesspoint``) at the key (``mykey``):: @@ -182,7 +191,7 @@ upload: mydoc.txt to s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/mykey -**Downloading from an S3 access point** +**Example 15: Downloading from an S3 access point** The following ``cp`` command downloads a single object (``mykey``) from the access point (``myaccesspoint``) to the local file (``mydoc.txt``):: diff -Nru awscli-2.15.9/awscli/examples/s3/ls.rst awscli-2.15.22/awscli/examples/s3/ls.rst --- awscli-2.15.9/awscli/examples/s3/ls.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/ls.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,6 +1,6 @@ **Example 1: Listing all user owned buckets** -The following ``ls`` command lists all of the bucket owned by the user. In this example, the user owns the buckets ``mybucket`` and ``mybucket2``. The timestamp is the date the bucket was created, shown in your machine's time zone. This date can change when making changes to your bucket, such as editing its bucket policy. Note if ``s3://`` is used for the path argument ````, it will list all of the buckets as well:: +The following ``ls`` command lists all of the bucket owned by the user. In this example, the user owns the buckets ``mybucket`` and ``mybucket2``. The timestamp is the date the bucket was created, shown in your machine's time zone. This date can change when making changes to your bucket, such as editing its bucket policy. Note if ``s3://`` is used for the path argument ````, it will list all of the buckets as well. :: aws s3 ls @@ -11,7 +11,7 @@ **Example 2: Listing all prefixes and objects in a bucket** -The following ``ls`` command lists objects and common prefixes under a specified bucket and prefix. In this example, the user owns the bucket ``mybucket`` with the objects ``test.txt`` and ``somePrefix/test.txt``. The ``LastWriteTime`` and ``Length`` are arbitrary. Note that since the ``ls`` command has no interaction with the local filesystem, the ``s3://`` URI scheme is not required to resolve ambiguity and may be omitted:: +The following ``ls`` command lists objects and common prefixes under a specified bucket and prefix. In this example, the user owns the bucket ``mybucket`` with the objects ``test.txt`` and ``somePrefix/test.txt``. The ``LastWriteTime`` and ``Length`` are arbitrary. Note that since the ``ls`` command has no interaction with the local filesystem, the ``s3://`` URI scheme is not required to resolve ambiguity and may be omitted. :: aws s3 ls s3://mybucket @@ -22,7 +22,7 @@ **Example 3: Listing all prefixes and objects in a specific bucket and prefix** -The following ``ls`` command lists objects and common prefixes under a specified bucket and prefix. However, there are no objects nor common prefixes under the specified bucket and prefix:: +The following ``ls`` command lists objects and common prefixes under a specified bucket and prefix. However, there are no objects nor common prefixes under the specified bucket and prefix. :: aws s3 ls s3://mybucket/noExistPrefix @@ -32,9 +32,10 @@ **Example 4: Recursively listing all prefixes and objects in a bucket** -The following ``ls`` command will recursively list objects in a bucket. Rather than showing ``PRE dirname/`` in the output, all the content in a bucket will be listed in order:: +The following ``ls`` command will recursively list objects in a bucket. Rather than showing ``PRE dirname/`` in the output, all the content in a bucket will be listed in order. :: - aws s3 ls s3://mybucket --recursive + aws s3 ls s3://mybucket \ + --recursive Output:: @@ -53,7 +54,10 @@ The following ``ls`` command demonstrates the same command using the --human-readable and --summarize options. --human-readable displays file size in Bytes/MiB/KiB/GiB/TiB/PiB/EiB. --summarize displays the total number of objects and total size at the end of the result listing:: - aws s3 ls s3://mybucket --recursive --human-readable --summarize + aws s3 ls s3://mybucket \ + --recursive \ + --human-readable \ + --summarize Output:: diff -Nru awscli-2.15.9/awscli/examples/s3/mb.rst awscli-2.15.22/awscli/examples/s3/mb.rst --- awscli-2.15.9/awscli/examples/s3/mb.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/mb.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,3 +1,5 @@ +**Example 1: Create a bucket** + The following ``mb`` command creates a bucket. In this example, the user makes the bucket ``mybucket``. The bucket is created in the region specified in the user's configuration file:: @@ -7,10 +9,13 @@ make_bucket: s3://mybucket +**Example 2: Create a bucket in the specified region** + The following ``mb`` command creates a bucket in a region specified by the ``--region`` parameter. In this example, the user makes the bucket ``mybucket`` in the region ``us-west-1``:: - aws s3 mb s3://mybucket --region us-west-1 + aws s3 mb s3://mybucket \ + --region us-west-1 Output:: diff -Nru awscli-2.15.9/awscli/examples/s3/mv.rst awscli-2.15.22/awscli/examples/s3/mv.rst --- awscli-2.15.9/awscli/examples/s3/mv.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/mv.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,4 +1,6 @@ -The following ``mv`` command moves a single file to a specified bucket and key:: +**Example 1: Move a local file to the specified bucket** + +The following ``mv`` command moves a single file to a specified bucket and key. :: aws s3 mv test.txt s3://mybucket/test2.txt @@ -6,7 +8,9 @@ move: test.txt to s3://mybucket/test2.txt -The following ``mv`` command moves a single s3 object to a specified bucket and key:: +**Example 2: Move an object to the specified bucket and key** + +The following ``mv`` command moves a single s3 object to a specified bucket and key. :: aws s3 mv s3://mybucket/test.txt s3://mybucket/test2.txt @@ -14,7 +18,9 @@ move: s3://mybucket/test.txt to s3://mybucket/test2.txt -The following ``mv`` command moves a single object to a specified file locally:: +**Example 3: Move an S3 object to the local directory** + +The following ``mv`` command moves a single object to a specified file locally. :: aws s3 mv s3://mybucket/test.txt test2.txt @@ -22,6 +28,8 @@ move: s3://mybucket/test.txt to test2.txt +**Example 4: Move an object with it's original name to the specified bucket** + The following ``mv`` command moves a single object to a specified bucket while retaining its original name:: aws s3 mv s3://mybucket/test.txt s3://mybucket2/ @@ -30,63 +38,78 @@ move: s3://mybucket/test.txt to s3://mybucket2/test.txt +**Example 5: Move all objects and prefixes in a bucket to the local directory** + When passed with the parameter ``--recursive``, the following ``mv`` command recursively moves all objects under a specified prefix and bucket to a specified directory. In this example, the bucket ``mybucket`` has the objects -``test1.txt`` and ``test2.txt``:: +``test1.txt`` and ``test2.txt``. :: - aws s3 mv s3://mybucket . --recursive + aws s3 mv s3://mybucket . \ + --recursive Output:: move: s3://mybucket/test1.txt to test1.txt move: s3://mybucket/test2.txt to test2.txt +**Example 6: Move all objects and prefixes in a bucket to the local directory, except ``.jpg`` files** + When passed with the parameter ``--recursive``, the following ``mv`` command recursively moves all files under a specified directory to a specified bucket and prefix while excluding some files by using an ``--exclude`` parameter. In -this example, the directory ``myDir`` has the files ``test1.txt`` and ``test2.jpg``:: +this example, the directory ``myDir`` has the files ``test1.txt`` and ``test2.jpg``. :: - aws s3 mv myDir s3://mybucket/ --recursive --exclude "*.jpg" + aws s3 mv myDir s3://mybucket/ \ + --recursive \ + --exclude "*.jpg" Output:: move: myDir/test1.txt to s3://mybucket2/test1.txt +**Example 7: Move all objects and prefixes in a bucket to the local directory, except specified prefix** + When passed with the parameter ``--recursive``, the following ``mv`` command recursively moves all objects under a specified bucket to another bucket while excluding some objects by using an ``--exclude`` parameter. In this example, -the bucket ``mybucket`` has the objects ``test1.txt`` and ``another/test1.txt``:: +the bucket ``mybucket`` has the objects ``test1.txt`` and ``another/test1.txt``. :: - aws s3 mv s3://mybucket/ s3://mybucket2/ --recursive --exclude "mybucket/another/*" + aws s3 mv s3://mybucket/ s3://mybucket2/ \ + --recursive \ + --exclude "mybucket/another/*" Output:: move: s3://mybucket/test1.txt to s3://mybucket2/test1.txt +**Example 8: Move an object to the specified bucket and set the ACL** + The following ``mv`` command moves a single object to a specified bucket and key while setting the ACL to -``public-read-write``:: +``public-read-write``. :: - aws s3 mv s3://mybucket/test.txt s3://mybucket/test2.txt --acl public-read-write + aws s3 mv s3://mybucket/test.txt s3://mybucket/test2.txt \ + --acl public-read-write Output:: move: s3://mybucket/test.txt to s3://mybucket/test2.txt +**Example 9: Move a local file to the specified bucket and grant permissions** + The following ``mv`` command illustrates the use of the ``--grants`` option to grant read access to all users and full -control to a specific user identified by their email address:: +control to a specific user identified by their email address. :: - aws s3 mv file.txt s3://mybucket/ --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=emailaddress=user@example.com + aws s3 mv file.txt s3://mybucket/ \ + --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=emailaddress=user@example.com Output:: move: file.txt to s3://mybucket/file.txt +**Example 10: Move a file to an S3 access point** -**Moving a file to an S3 access point** - -The following ``mv`` command moves a single file (``mydoc.txt``) to the access point (``myaccesspoint``) at the key (``mykey``):: +The following ``mv`` command moves a single file named ``mydoc.txt`` to the access point named ``myaccesspoint`` at the key named ``mykey``. :: aws s3 mv mydoc.txt s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/mykey Output:: - move: mydoc.txt to s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/mykey - + move: mydoc.txt to s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/mykey \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/s3/rb.rst awscli-2.15.22/awscli/examples/s3/rb.rst --- awscli-2.15.9/awscli/examples/s3/rb.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/rb.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,3 +1,5 @@ +**Example 1: Delete a bucket** + The following ``rb`` command removes a bucket. In this example, the user's bucket is ``mybucket``. Note that the bucket must be empty in order to remove:: aws s3 rb s3://mybucket @@ -6,15 +8,17 @@ remove_bucket: mybucket +**Example 2: Force delete a bucket** + The following ``rb`` command uses the ``--force`` parameter to first remove all of the objects in the bucket and then remove the bucket itself. In this example, the user's bucket is ``mybucket`` and the objects in ``mybucket`` are ``test1.txt`` and ``test2.txt``:: - aws s3 rb s3://mybucket --force + aws s3 rb s3://mybucket \ + --force Output:: delete: s3://mybucket/test1.txt delete: s3://mybucket/test2.txt - remove_bucket: mybucket - + remove_bucket: mybucket \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/s3/rm.rst awscli-2.15.22/awscli/examples/s3/rm.rst --- awscli-2.15.9/awscli/examples/s3/rm.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/rm.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,3 +1,5 @@ +**Example 1: Delete an S3 object** + The following ``rm`` command deletes a single s3 object:: aws s3 rm s3://mybucket/test2.txt @@ -6,41 +8,53 @@ delete: s3://mybucket/test2.txt +**Example 2: Delete all contents in a bucket** + The following ``rm`` command recursively deletes all objects under a specified bucket and prefix when passed with the parameter ``--recursive``. In this example, the bucket ``mybucket`` contains the objects ``test1.txt`` and ``test2.txt``:: - aws s3 rm s3://mybucket --recursive + aws s3 rm s3://mybucket \ + --recursive Output:: delete: s3://mybucket/test1.txt delete: s3://mybucket/test2.txt +**Example 3: Delete all contents in a bucket, except ``.jpg`` files** + + The following ``rm`` command recursively deletes all objects under a specified bucket and prefix when passed with the parameter ``--recursive`` while excluding some objects by using an ``--exclude`` parameter. In this example, the bucket ``mybucket`` has the objects ``test1.txt`` and ``test2.jpg``:: - aws s3 rm s3://mybucket/ --recursive --exclude "*.jpg" + aws s3 rm s3://mybucket/ \ + --recursive \ + --exclude "*.jpg" Output:: delete: s3://mybucket/test1.txt +**Example 4: Delete all contents in a bucket, except objects under the specified prefix** + The following ``rm`` command recursively deletes all objects under a specified bucket and prefix when passed with the parameter ``--recursive`` while excluding all objects under a particular prefix by using an ``--exclude`` parameter. In this example, the bucket ``mybucket`` has the objects ``test1.txt`` and ``another/test.txt``:: - aws s3 rm s3://mybucket/ --recursive --exclude "another/*" + aws s3 rm s3://mybucket/ \ + --recursive \ + --exclude "another/*" Output:: delete: s3://mybucket/test1.txt +**Example 5: Delete an object from an S3 access point** -**Deleting an object from an S3 access point** - -The following ``rm`` command deletes a single object (``mykey``) from the access point (``myaccesspoint``):: +The following ``rm`` command deletes a single object (``mykey``) from the access point (``myaccesspoint``). :: +The following ``rm`` command deletes a single object (``mykey``) from the access point (``myaccesspoint``). :: aws s3 rm s3://arn:aws:s3:us-west-2:123456789012:accesspoint/myaccesspoint/mykey diff -Nru awscli-2.15.9/awscli/examples/s3/sync.rst awscli-2.15.22/awscli/examples/s3/sync.rst --- awscli-2.15.9/awscli/examples/s3/sync.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/sync.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,15 +1,11 @@ +**Example 1: Sync all local objects to the specified bucket** -**Sync from local directory to S3 bucket** - -The following ``sync`` command syncs objects to a specified bucket and prefix from files in a local directory by -uploading the local files to s3. A local file will require uploading if one of the following conditions is true: - -* The local file does not exist under the specified bucket and prefix. -* The size of the local file is different than the size of the s3 object. -* The last modified time of the local file is newer than the last modified time of the s3 object. - -In this example, the user syncs the bucket ``mybucket`` to the local current directory. The local current directory -contains the files ``test.txt`` and ``test2.txt``. The bucket ``mybucket`` contains no objects:: +The following ``sync`` command syncs objects from a local directory to the specified prefix and bucket by +uploading the local files to S3. A local file will require uploading if the size of the local file is different than +the size of the S3 object, the last modified time of the local file is newer than the last modified time of the S3 +object, or the local file does not exist under the specified bucket and prefix. In this example, the user syncs the +bucket ``mybucket`` to the local current directory. The local current directory contains the files ``test.txt`` and +``test2.txt``. The bucket ``mybucket`` contains no objects. :: aws s3 sync . s3://mybucket @@ -18,17 +14,14 @@ upload: test.txt to s3://mybucket/test.txt upload: test2.txt to s3://mybucket/test2.txt -**Sync from S3 bucket to another S3 bucket** - -The following ``sync`` command syncs objects to a specified bucket and prefix from objects in another specified -bucket and prefix by copying s3 objects. An s3 object will require copying if one of the following conditions is true: +**Example 2: Sync all S3 objects from the specified S3 bucket to another bucket** -* The s3 object does not exist in the specified bucket and prefix destination. -* The sizes of the two s3 objects differ. -* The last modified time of the source is newer than the last modified time of the destination. +The following ``sync`` command syncs objects under a specified prefix and bucket to objects under another specified +prefix and bucket by copying S3 objects. An S3 object will require copying if the sizes of the two S3 objects differ, +the last modified time of the source is newer than the last modified time of the destination, or the S3 object does not +exist under the specified bucket and prefix destination. -In this example, the user syncs the bucket ``mybucket`` to the bucket ``mybucket2``. -The bucket ``mybucket`` contains the objects ``test.txt`` and ``test2.txt``. The bucket +In this example, the user syncs the bucket ``mybucket`` to the bucket ``mybucket2``. The bucket ``mybucket`` contains the objects ``test.txt`` and ``test2.txt``. The bucket ``mybucket2`` contains no objects:: aws s3 sync s3://mybucket s3://mybucket2 @@ -38,18 +31,15 @@ copy: s3://mybucket/test.txt to s3://mybucket2/test.txt copy: s3://mybucket/test2.txt to s3://mybucket2/test2.txt -**Sync from S3 bucket to local directory** - -The following ``sync`` command syncs files to a local directory from objects in a specified bucket and prefix by -downloading s3 objects. An s3 object will require downloading if one of the following conditions is true: +**Example 3: Sync all S3 objects from the specified S3 bucket to the local directory** -* The s3 object does not exist in the local directory. -* The size of the s3 object differs from the size of the local file. -* The last modified time of the s3 object is older than the last modified time of the local file. - -Take note that when objects are downloaded from s3, the last modified time of the local file is changed to the last modified time of the s3 object. -In this example, the user syncs the current local directory to the bucket ``mybucket``. The bucket ``mybucket`` contains -the objects ``test.txt`` and ``test2.txt``. The current local directory has no files:: +The following ``sync`` command syncs files from the specified S3 bucket to the local directory by +downloading S3 objects. An S3 object will require downloading if the size of the S3 object differs from the size of the +local file, the last modified time of the S3 object is newer than the last modified time of the local file, or the S3 +object does not exist in the local directory. Take note that when objects are downloaded from S3, the last modified +time of the local file is changed to the last modified time of the S3 object. In this example, the user syncs the +bucket ``mybucket`` to the current local directory. The bucket ``mybucket`` contains the objects ``test.txt`` and +``test2.txt``. The current local directory has no files:: aws s3 sync s3://mybucket . @@ -58,15 +48,16 @@ download: s3://mybucket/test.txt to test.txt download: s3://mybucket/test2.txt to test2.txt -**Sync from local directory to S3 bucket while deleting files that exist in the destination but not in the source** +**Example 4: Sync all local objects to the specified bucket and delete all files that do not match** -The following ``sync`` command syncs objects to a specified bucket and prefix from files in a local directory by -uploading the local files to s3. Because the ``--delete`` parameter flag is used, any files existing in -specified bucket and prefix but not existing in the local directory will be deleted. In this example, the user syncs +The following ``sync`` command syncs objects under a specified prefix and bucket to files in a local directory by +uploading the local files to S3. Because of the ``--delete`` parameter, any files existing under the +specified prefix and bucket but not existing in the local directory will be deleted. In this example, the user syncs the bucket ``mybucket`` to the local current directory. The local current directory contains the files ``test.txt`` and ``test2.txt``. The bucket ``mybucket`` contains the object ``test3.txt``:: - aws s3 sync . s3://mybucket --delete + aws s3 sync . s3://mybucket \ + --delete Output:: @@ -74,58 +65,49 @@ upload: test2.txt to s3://mybucket/test2.txt delete: s3://mybucket/test3.txt -**Sync from local directory to S3 bucket while excluding files that match a specified pattern** +**Example 5: Sync all local objects to the specified bucket except ``.jpg`` files** -The following ``sync`` command syncs objects to a specified bucket and prefix from files in a local directory by -uploading the local files to s3. Because the ``--exclude`` parameter flag is used, all files matching the pattern -existing both in s3 and locally will be excluded from the sync. In this example, the user syncs the bucket ``mybucket`` +The following ``sync`` command syncs objects under a specified prefix and bucket to files in a local directory by +uploading the local files to S3. Because of the ``--exclude`` parameter, all files matching the pattern +existing both in S3 and locally will be excluded from the sync. In this example, the user syncs the bucket ``mybucket`` to the local current directory. The local current directory contains the files ``test.jpg`` and ``test2.txt``. The bucket ``mybucket`` contains the object ``test.jpg`` of a different size than the local ``test.jpg``:: - aws s3 sync . s3://mybucket --exclude "*.jpg" + aws s3 sync . s3://mybucket \ + --exclude "*.jpg" Output:: upload: test2.txt to s3://mybucket/test2.txt -**Sync from S3 bucket to local directory while excluding objects that match a specified pattern** +**Example 6: Sync all local objects to the specified bucket except ``.jpg`` files** -The following ``sync`` command syncs files to a local directory from objects in a specified bucket and prefix by -downloading s3 objects. This example uses the ``--exclude`` parameter flag to exclude a specified directory -and s3 prefix from the ``sync`` command. In this example, the user syncs the local current directory to the bucket +The following ``sync`` command syncs files under a local directory to objects under a specified prefix and bucket by +downloading S3 objects. This example uses the ``--exclude`` parameter flag to exclude a specified directory +and S3 prefix from the ``sync`` command. In this example, the user syncs the local current directory to the bucket ``mybucket``. The local current directory contains the files ``test.txt`` and ``another/test2.txt``. The bucket ``mybucket`` contains the objects ``another/test5.txt`` and ``test1.txt``:: - aws s3 sync s3://mybucket/ . --exclude "*another/*" + aws s3 sync s3://mybucket/ . \ + --exclude "*another/*" Output:: download: s3://mybucket/test1.txt to test1.txt -**Sync from S3 bucket to another S3 bucket while excluding and including objects that match a specified pattern** - -The following ``sync`` command syncs objects under a specified prefix and bucket to objects under another specified -prefix and bucket by copying s3 objects. Because both the ``--exclude`` and ``--include`` parameter flags are thrown, -the second flag will take precedence over the first flag. In this example, all files are excluded from the ``sync`` -command except for files ending with .txt. The bucket ``mybucket`` contains the objects ``test.txt``, ``image1.png``, -and ``image2.png``. The bucket``mybucket2`` contains no objects:: - - aws s3 sync s3://mybucket s3://mybucket2 \ - --exclude "*" \ - --include "*txt" - -Output:: - - copy: s3://mybucket/test.txt to s3://mybucket2/test.txt - -**Sync from S3 bucket to another S3 bucket in a different region** +**Example 7: Sync all objects between buckets in different regions** The following ``sync`` command syncs files between two buckets in different regions:: - aws s3 sync s3://my-us-west-2-bucket s3://my-us-east-1-bucket --source-region us-west-2 --region us-east-1 + aws s3 sync s3://my-us-west-2-bucket s3://my-us-east-1-bucket \ + --source-region us-west-2 \ + --region us-east-1 + +Output:: + download: s3://my-us-west-2-bucket/test1.txt to s3://my-us-east-1-bucket/test1.txt -**Sync to an S3 access point** +**Example 8: Sync to an S3 access point** The following ``sync`` command syncs the current directory to the access point (``myaccesspoint``):: diff -Nru awscli-2.15.9/awscli/examples/s3/website.rst awscli-2.15.22/awscli/examples/s3/website.rst --- awscli-2.15.9/awscli/examples/s3/website.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/s3/website.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,9 +1,11 @@ -The following command configures a bucket named ``my-bucket`` as a static website:: +**Configure an S3 bucket as a static website** - aws s3 website s3://my-bucket/ --index-document index.html --error-document error.html +The following command configures a bucket named ``my-bucket`` as a static website. The index document option specifies the file in ``my-bucket`` that visitors will be directed to when they navigate to the website URL. In this case, the bucket is in the us-west-2 region, so the site would appear at ``http://my-bucket.s3-website-us-west-2.amazonaws.com``. -The index document option specifies the file in ``my-bucket`` that visitors will be directed to when they navigate to the website URL. In this case, the bucket is in the us-west-2 region, so the site would appear at ``http://my-bucket.s3-website-us-west-2.amazonaws.com``. +All files in the bucket that appear on the static site must be configured to allow visitors to open them. File permissions are configured separately from the bucket website configuration. :: -All files in the bucket that appear on the static site must be configured to allow visitors to open them. File permissions are configured separately from the bucket website configuration. For information on hosting a static website in Amazon S3, see `Hosting a Static Website`_ in the *Amazon Simple Storage Service Developer Guide*. + aws s3 website s3://my-bucket/ \ + --index-document index.html \ + --error-document error.html -.. _`Hosting a Static Website`: http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html \ No newline at end of file +For information on hosting a static website in Amazon S3, see `Hosting a Static Website `__ in the *Amazon Simple Storage Service Developer Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/batch-get-configuration-policy-associations.rst awscli-2.15.22/awscli/examples/securityhub/batch-get-configuration-policy-associations.rst --- awscli-2.15.9/awscli/examples/securityhub/batch-get-configuration-policy-associations.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/batch-get-configuration-policy-associations.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,20 @@ +**To get configuration association details for a batch of targets** + +The following ``batch-get-configuration-policy-associations`` example retrieves association details for the specified targets. You can provide account IDs, organizational unit IDs, or the root ID for the target. :: + + aws securityhub batch-get-configuration-policy-associations \ + --target '{"OrganizationalUnitId": "ou-6hi7-8j91kl2m"}' + +Output:: + + { + "ConfigurationPolicyId": "a1b2c3d4-5678-90ab-cdef-EXAMPLE33333", + "TargetId": "ou-6hi7-8j91kl2m", + "TargetType": "ORGANIZATIONAL_UNIT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-09-26T21:13:01.816000+00:00", + "AssociationStatus": "SUCCESS", + "AssociationStatusMessage": "Association applied successfully on this target." + } + +For more information, see `Viewing Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/batch-get-security-controls.rst awscli-2.15.22/awscli/examples/securityhub/batch-get-security-controls.rst --- awscli-2.15.9/awscli/examples/securityhub/batch-get-security-controls.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/batch-get-security-controls.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,31 +1,43 @@ -**To get control details** +**To get security control details** -The following ``batch-get-security-controls`` example gets details for Config.1 and IAM.1 in the current AWS account and AWS Region. :: +The following ``batch-get-security-controls`` example gets details for the security controls ACM.1 and IAM.1 in the current AWS account and AWS Region. :: aws securityhub batch-get-security-controls \ - --security-control-ids '["Config.1", "IAM.1"]' + --security-control-ids '["ACM.1", "IAM.1"]' Output:: { "SecurityControls": [ { - "SecurityControlId": "Config.1", - "SecurityControlArn": "arn:aws:securityhub:us-east-2:068873283051:security-control/Config.1", - "Title": "AWS Config should be enabled", - "Description": "This AWS control checks whether the Config service is enabled in the account for the local region and is recording all resources.", - "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/Config.1/remediation", + "SecurityControlId": "ACM.1", + "SecurityControlArn": "arn:aws:securityhub:us-east-2:123456789012:security-control/ACM.1", + "Title": "Imported and ACM-issued certificates should be renewed after a specified time period", + "Description": "This control checks whether an AWS Certificate Manager (ACM) certificate is renewed within the specified time period. It checks both imported certificates and certificates provided by ACM. The control fails if the certificate isn't renewed within the specified time period. Unless you provide a custom parameter value for the renewal period, Security Hub uses a default value of 30 days.", + "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/ACM.1/remediation", "SeverityRating": "MEDIUM", "SecurityControlStatus": "ENABLED" + "UpdateStatus": "READY", + "Parameters": { + "daysToExpiration": { + "ValueType": CUSTOM, + "Value": { + "Integer": 15 + } + } + }, + "LastUpdateReason": "Updated control parameter" }, { "SecurityControlId": "IAM.1", - "SecurityControlArn": "arn:aws:securityhub:us-east-2:068873283051:security-control/IAM.1", + "SecurityControlArn": "arn:aws:securityhub:us-east-2:123456789012:security-control/IAM.1", "Title": "IAM policies should not allow full \"*\" administrative privileges", "Description": "This AWS control checks whether the default version of AWS Identity and Access Management (IAM) policies (also known as customer managed policies) do not have administrator access with a statement that has \"Effect\": \"Allow\" with \"Action\": \"*\" over \"Resource\": \"*\". It only checks for the Customer Managed Policies that you created, but not inline and AWS Managed Policies.", "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/IAM.1/remediation", "SeverityRating": "HIGH", "SecurityControlStatus": "ENABLED" + "UpdateStatus": "READY", + "Parameters": {} } ] } diff -Nru awscli-2.15.9/awscli/examples/securityhub/create-configuration-policy.rst awscli-2.15.22/awscli/examples/securityhub/create-configuration-policy.rst --- awscli-2.15.9/awscli/examples/securityhub/create-configuration-policy.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/create-configuration-policy.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,49 @@ +**To create a configuration policy** + +The following ``create-configuration-policy`` example creates a configuration policy with the specified settings. :: + + aws securityhub create-configuration-policy \ + --name "SampleConfigurationPolicy" \ + --description "SampleDescription" \ + --configuration-policy '{"SecurityHub": {"ServiceEnabled": true, "EnabledStandardIdentifiers": ["arn:aws:securityhub:eu-central-1::standards/aws-foundational-security-best-practices/v/1.0.0","arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0"],"SecurityControlsConfiguration":{"DisabledSecurityControlIdentifiers": ["CloudTrail.2"], "SecurityControlCustomParameters": [{"SecurityControlId": "ACM.1", "Parameters": {"daysToExpiration": {"ValueType": "CUSTOM", "Value": {"Integer": 15}}}}]}}}' \ + --tags '{"Environment": "Prod"}' + +Output:: + + { + "Arn": "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "Id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "Name": "SampleConfigurationPolicy", + "Description": "SampleDescription", + "UpdatedAt": "2023-11-28T20:28:04.494000+00:00", + "CreatedAt": "2023-11-28T20:28:04.494000+00:00", + "ConfigurationPolicy": { + "SecurityHub": { + "ServiceEnabled": true, + "EnabledStandardIdentifiers": [ + "arn:aws:securityhub:eu-central-1::standards/aws-foundational-security-best-practices/v/1.0.0", + "arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0" + ], + "SecurityControlsConfiguration": { + "DisabledSecurityControlIdentifiers": [ + "CloudTrail.2" + ], + "SecurityControlCustomParameters": [ + { + "SecurityControlId": "ACM.1", + "Parameters": { + "daysToExpiration": { + "ValueType": "CUSTOM", + "Value": { + "Integer": 15 + } + } + } + } + ] + } + } + } + } + +For more information, see `Creating and associating Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/delete-configuration-policy.rst awscli-2.15.22/awscli/examples/securityhub/delete-configuration-policy.rst --- awscli-2.15.9/awscli/examples/securityhub/delete-configuration-policy.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/delete-configuration-policy.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,10 @@ +**To delete a configuration policy** + +The following ``delete-configuration-policy`` example deletes the specified configuration policy. :: + + aws securityhub delete-configuration-policy \ + --identifier "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + +This command produces no output. + +For more information, see `Deleting and disassociating Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/describe-organization-configuration.rst awscli-2.15.22/awscli/examples/securityhub/describe-organization-configuration.rst --- awscli-2.15.9/awscli/examples/securityhub/describe-organization-configuration.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/describe-organization-configuration.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,14 +1,20 @@ -**To view information about an integration with AWS Organizations** +**To view how Security Hub is configured for an organization** -The following ``describe-organization-configuration`` example returns information about the integration with Organizations. :: +The following ``describe-organization-configuration`` example returns information about the way an organization is configured in Security Hub. In this example, the organization uses central configuration. Only the Security Hub administrator account can run this command. :: - aws securityhub describe-organization-configuration + aws securityhub describe-organization-configuration Output:: { - "autoEnable": true, - "memberAccountLimitReached": false + "AutoEnable": false, + "MemberAccountLimitReached": false, + "AutoEnableStandards": "NONE", + "OrganizationConfiguration": { + "ConfigurationType": "LOCAL", + "Status": "ENABLED", + "StatusMessage": "Central configuration has been enabled successfully" + } } -For more information, see `Managing accounts `__ in the *AWS Security Hub User Guide*. +For more information, see `Managing accounts with AWS Organizations `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/get-configuration-policy-association.rst awscli-2.15.22/awscli/examples/securityhub/get-configuration-policy-association.rst --- awscli-2.15.9/awscli/examples/securityhub/get-configuration-policy-association.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/get-configuration-policy-association.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,20 @@ +**To get configuration association details for a target** + +The following ``get-configuration-policy-association`` example retrieves association details for the specified target. You can provide an account ID, organizational unit ID, or the root ID for the target. :: + + aws securityhub get-configuration-policy-association \ + --target '{"OrganizationalUnitId": "ou-6hi7-8j91kl2m"}' + +Output:: + + { + "ConfigurationPolicyId": "a1b2c3d4-5678-90ab-cdef-EXAMPLE33333", + "TargetId": "ou-6hi7-8j91kl2m", + "TargetType": "ORGANIZATIONAL_UNIT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-09-26T21:13:01.816000+00:00", + "AssociationStatus": "SUCCESS", + "AssociationStatusMessage": "Association applied successfully on this target." + } + +For more information, see `Viewing Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/get-configuration-policy.rst awscli-2.15.22/awscli/examples/securityhub/get-configuration-policy.rst --- awscli-2.15.9/awscli/examples/securityhub/get-configuration-policy.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/get-configuration-policy.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,46 @@ +**To view configuration policy details** + +The following ``get-configuration-policy`` example retrieves details about the specified configuration policy. :: + + aws securityhub get-configuration-policy \ + --identifier "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111" + +Output:: + + { + "Arn": "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "Id": "ce5ed1e7-9639-4e2f-9313-fa87fcef944b", + "Name": "SampleConfigurationPolicy", + "Description": "SampleDescription", + "UpdatedAt": "2023-11-28T20:28:04.494000+00:00", + "CreatedAt": "2023-11-28T20:28:04.494000+00:00", + "ConfigurationPolicy": { + "SecurityHub": { + "ServiceEnabled": true, + "EnabledStandardIdentifiers": [ + "arn:aws:securityhub:eu-central-1::standards/aws-foundational-security-best-practices/v/1.0.0", + "arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0" + ], + "SecurityControlsConfiguration": { + "DisabledSecurityControlIdentifiers": [ + "CloudTrail.2" + ], + "SecurityControlCustomParameters": [ + { + "SecurityControlId": "ACM.1", + "Parameters": { + "daysToExpiration": { + "ValueType": "CUSTOM", + "Value": { + "Integer": 15 + } + } + } + } + ] + } + } + } + } + +For more information, see `Viewing Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/get-security-control-definition.rst awscli-2.15.22/awscli/examples/securityhub/get-security-control-definition.rst --- awscli-2.15.9/awscli/examples/securityhub/get-security-control-definition.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/get-security-control-definition.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,33 @@ +**To get security control definition details** + +The following ``get-security-control-definition`` example retrieves definition details for a Security Hub security control. Details include the control title, description, Region availability, parameters, and other information. :: + + aws securityhub get-security-control-definition \ + --security-control-id ACM.1 + +Output:: + + { + "SecurityControlDefinition": { + "SecurityControlId": "ACM.1", + "Title": "Imported and ACM-issued certificates should be renewed after a specified time period", + "Description": "This control checks whether an AWS Certificate Manager (ACM) certificate is renewed within the specified time period. It checks both imported certificates and certificates provided by ACM. The control fails if the certificate isn't renewed within the specified time period. Unless you provide a custom parameter value for the renewal period, Security Hub uses a default value of 30 days.", + "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/ACM.1/remediation", + "SeverityRating": "MEDIUM", + "CurrentRegionAvailability": "AVAILABLE", + "ParameterDefinitions": { + "daysToExpiration": { + "Description": "Number of days within which the ACM certificate must be renewed", + "ConfigurationOptions": { + "Integer": { + "DefaultValue": 30, + "Min": 14, + "Max": 365 + } + } + } + } + } + } + +For more information, see `Custom control parameters `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/list-configuration-policies.rst awscli-2.15.22/awscli/examples/securityhub/list-configuration-policies.rst --- awscli-2.15.9/awscli/examples/securityhub/list-configuration-policies.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/list-configuration-policies.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,38 @@ +**To list configuration policy summaries** + +The following ``list-configuration-policies`` example lists a summary of configuration policies for the organization. :: + + aws securityhub list-configuration-policies \ + --max-items 3 + +Output:: + + { + "ConfigurationPolicySummaries": [ + { + "Arn": "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "Id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "Name": "SampleConfigurationPolicy1", + "Description": "SampleDescription1", + "UpdatedAt": "2023-09-26T21:08:36.214000+00:00", + "ServiceEnabled": true + }, + { + "Arn": "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE22222", + "Id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE22222", + "Name": "SampleConfigurationPolicy2", + "Description": "SampleDescription2" + "UpdatedAt": "2023-11-28T19:26:25.207000+00:00", + "ServiceEnabled": true + }, + { + "Arn": "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE33333", + "Id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE33333", + "Name": "SampleConfigurationPolicy3", + "Description": "SampleDescription3", + "UpdatedAt": "2023-11-28T20:28:04.494000+00:00", + "ServiceEnabled": true + } + } + +For more information, see `Viewing Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/list-configuration-policy-associations.rst awscli-2.15.22/awscli/examples/securityhub/list-configuration-policy-associations.rst --- awscli-2.15.9/awscli/examples/securityhub/list-configuration-policy-associations.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/list-configuration-policy-associations.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,50 @@ +**To list configuration associations** + +The following ``list-configuration-policy-associations`` example lists a summary of configuration associations for the organization. The response include associations with configuration policies and self-managed behavior. :: + + aws securityhub list-configuration-policy-associations \ + --association-type "APPLIED" \ + --max-items 4 + +Output:: + + { + "ConfigurationPolicyAssociationSummaries": [ + { + "ConfigurationPolicyId": "a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "TargetId": "r-1ab2", + "TargetType": "ROOT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-11-28T19:26:49.417000+00:00", + "AssociationStatus": "FAILED", + "AssociationStatusMessage": "Policy association failed because 2 organizational units or accounts under this root failed." + }, + { + "ConfigurationPolicyId": "a1b2c3d4-5678-90ab-cdef-EXAMPLE22222", + "TargetId": "ou-1ab2-c3de4f5g", + "TargetType": "ORGANIZATIONAL_UNIT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-09-26T21:14:05.283000+00:00", + "AssociationStatus": "FAILED", + "AssociationStatusMessage": "One or more children under this target failed association." + }, + { + "ConfigurationPolicyId": "a1b2c3d4-5678-90ab-cdef-EXAMPLE33333", + "TargetId": "ou-6hi7-8j91kl2m", + "TargetType": "ORGANIZATIONAL_UNIT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-09-26T21:13:01.816000+00:00", + "AssociationStatus": "SUCCESS", + "AssociationStatusMessage": "Association applied successfully on this target." + }, + { + "ConfigurationPolicyId": "SELF_MANAGED_SECURITY_HUB", + "TargetId": "111122223333", + "TargetType": "ACCOUNT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-11-28T22:01:26.409000+00:00", + "AssociationStatus": "SUCCESS" + } + } + +For more information, see `Viewing Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/list-security-control-definitions.rst awscli-2.15.22/awscli/examples/securityhub/list-security-control-definitions.rst --- awscli-2.15.9/awscli/examples/securityhub/list-security-control-definitions.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/list-security-control-definitions.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,4 +1,4 @@ -**Example 1: To list available security controls** +**Example 1: To list all available security controls** The following ``list-security-control-definitions`` example lists the available security controls across all Security Hub standards. This example limits the results to three controls. :: @@ -12,10 +12,13 @@ { "SecurityControlId": "ACM.1", "Title": "Imported and ACM-issued certificates should be renewed after a specified time period", - "Description": "This AWS control checks whether ACM Certificates in your account are marked for expiration within a specified time period. Certificates provided by ACM are automatically renewed. ACM does not automatically renew certificates that you import.", + "Description": "This control checks whether an AWS Certificate Manager (ACM) certificate is renewed within the specified time period. It checks both imported certificates and certificates provided by ACM. The control fails if the certificate isn't renewed within the specified time period. Unless you provide a custom parameter value for the renewal period, Security Hub uses a default value of 30 days.", "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/ACM.1/remediation", "SeverityRating": "MEDIUM", - "CurrentRegionAvailability": "AVAILABLE" + "CurrentRegionAvailability": "AVAILABLE", + "CustomizableProperties": [ + "Parameters" + ] }, { "SecurityControlId": "ACM.2", @@ -23,15 +26,19 @@ "Description": "This control checks whether RSA certificates managed by AWS Certificate Manager use a key length of at least 2,048 bits. The control fails if the key length is smaller than 2,048 bits.", "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/ACM.2/remediation", "SeverityRating": "HIGH", - "CurrentRegionAvailability": "AVAILABLE" + "CurrentRegionAvailability": "AVAILABLE", + "CustomizableProperties": [] }, { "SecurityControlId": "APIGateway.1", "Title": "API Gateway REST and WebSocket API execution logging should be enabled", - "Description": "This control checks whether all stages of Amazon API Gateway REST and WebSocket APIs have logging enabled. The control fails if logging is not enabled for all methods of a stage or if loggingLevel is neither ERROR nor INFO.", + "Description": "This control checks whether all stages of an Amazon API Gateway REST or WebSocket API have logging enabled. The control fails if the 'loggingLevel' isn't 'ERROR' or 'INFO' for all stages of the API. Unless you provide custom parameter values to indicate that a specific log type should be enabled, Security Hub produces a passed finding if the logging level is either 'ERROR' or 'INFO'.", "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/APIGateway.1/remediation", "SeverityRating": "MEDIUM", - "CurrentRegionAvailability": "AVAILABLE" + "CurrentRegionAvailability": "AVAILABLE", + "CustomizableProperties": [ + "Parameters" + ] } ], "NextToken": "U2FsdGVkX1/UprCPzxVbkDeHikDXbDxfgJZ1w2RG1XWsFPTMTIQPVE0m/FduIGxS7ObRtAbaUt/8/RCQcg2PU0YXI20hH/GrhoOTgv+TSm0qvQVFhkJepWmqh+NYawjocVBeos6xzn/8qnbF9IuwGg==" @@ -57,7 +64,8 @@ "Description": "This AWS control checks that there is at least one multi-region AWS CloudTrail trail includes read and write management events.", "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/CloudTrail.1/remediation", "SeverityRating": "HIGH", - "CurrentRegionAvailability": "AVAILABLE" + "CurrentRegionAvailability": "AVAILABLE", + "CustomizableProperties": [] }, { "SecurityControlId": "CloudTrail.2", @@ -65,7 +73,8 @@ "Description": "This AWS control checks whether AWS CloudTrail is configured to use the server side encryption (SSE) AWS Key Management Service (AWS KMS) customer master key (CMK) encryption. The check will pass if the KmsKeyId is defined.", "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/CloudTrail.2/remediation", "SeverityRating": "MEDIUM", - "CurrentRegionAvailability": "AVAILABLE" + "CurrentRegionAvailability": "AVAILABLE", + "CustomizableProperties": [] }, { "SecurityControlId": "CloudTrail.4", @@ -73,7 +82,8 @@ "Description": "This AWS control checks whether CloudTrail log file validation is enabled.", "RemediationUrl": "https://docs.aws.amazon.com/console/securityhub/CloudTrail.4/remediation", "SeverityRating": "MEDIUM", - "CurrentRegionAvailability": "AVAILABLE" + "CurrentRegionAvailability": "AVAILABLE", + "CustomizableProperties": [] } ], "NextToken": "eyJOZXh0VG9rZW4iOiBudWxsLCAiYm90b190cnVuY2F0ZV9hbW91bnQiOiAzfQ==" diff -Nru awscli-2.15.9/awscli/examples/securityhub/start-configuration-policy-association.rst awscli-2.15.22/awscli/examples/securityhub/start-configuration-policy-association.rst --- awscli-2.15.9/awscli/examples/securityhub/start-configuration-policy-association.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/start-configuration-policy-association.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,41 @@ +**Example 1: To associate a configuration policy** + +The following ``start-configuration-policy-association`` example associates the specified configuration policy with the specified organizational unit. A configuration may be associated with a target account, organizational unit, or the root. :: + + aws securityhub start-configuration-policy-association \ + --configuration-policy-identifier "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE33333" \ + --target '{"OrganizationalUnitId": "ou-6hi7-8j91kl2m"}' + +Output:: + + { + "ConfigurationPolicyId": "a1b2c3d4-5678-90ab-cdef-EXAMPLE33333", + "TargetId": "ou-6hi7-8j91kl2m", + "TargetType": "ORGANIZATIONAL_UNIT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-11-29T17:40:52.468000+00:00", + "AssociationStatus": "PENDING" + } + +For more information, see `Creating and associating Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. + +**Example 2: To associate a self-managed configuration** + +The following ``start-configuration-policy-association`` example associates a self-managed configuration with the specified account. :: + + aws securityhub start-configuration-policy-association \ + --configuration-policy-identifier "SELF_MANAGED_SECURITY_HUB" \ + --target '{"OrganizationalUnitId": "123456789012"}' + +Output:: + + { + "ConfigurationPolicyId": "SELF_MANAGED_SECURITY_HUB", + "TargetId": "123456789012", + "TargetType": "ACCOUNT", + "AssociationType": "APPLIED", + "UpdatedAt": "2023-11-29T17:40:52.468000+00:00", + "AssociationStatus": "PENDING" + } + +For more information, see `Creating and associating Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/start-configuration-policy-disassociation.rst awscli-2.15.22/awscli/examples/securityhub/start-configuration-policy-disassociation.rst --- awscli-2.15.9/awscli/examples/securityhub/start-configuration-policy-disassociation.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/start-configuration-policy-disassociation.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,23 @@ +**Example 1: To disassociate a configuration policy** + +The following ``start-configuration-policy-disassociation`` example disassociates a configuration policy from the specified organizational unit. A configuration may be disassociated from a target account, organizational unit, or the root. :: + + aws securityhub start-configuration-policy-disassociation \ + --configuration-policy-identifier "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE33333" \ + --target '{"OrganizationalUnitId": "ou-6hi7-8j91kl2m"}' + +This command produces no output. + +For more information, see `Disassociating a configuration from accounts and OUs `__ in the *AWS Security Hub User Guide*. + +**Example 2: To disassociate a self-managed configuration** + +The following ``start-configuration-policy-disassociation`` example disassociates a self-managed configuration from the specified account. :: + + aws securityhub start-configuration-policy-disassociation \ + --configuration-policy-identifier "SELF_MANAGED_SECURITY_HUB" \ + --target '{"AccountId": "123456789012"}' + +This command produces no output. + +For more information, see `Disassociating a configuration from accounts and OUs `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/update-configuration-policy.rst awscli-2.15.22/awscli/examples/securityhub/update-configuration-policy.rst --- awscli-2.15.9/awscli/examples/securityhub/update-configuration-policy.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/update-configuration-policy.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,50 @@ +**To update a configuration policy** + +The following ``update-configuration-policy`` example updates an existing configuration policy to use the specified settings. :: + + aws securityhub update-configuration-policy \ + --identifier "arn:aws:securityhub:eu-central-1:508236694226:configuration-policy/09f37766-57d8-4ede-9d33-5d8b0fecf70e" \ + --name "SampleConfigurationPolicyUpdated" \ + --description "SampleDescriptionUpdated" \ + --configuration-policy '{"SecurityHub": {"ServiceEnabled": true, "EnabledStandardIdentifiers": ["arn:aws:securityhub:eu-central-1::standards/aws-foundational-security-best-practices/v/1.0.0","arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0"],"SecurityControlsConfiguration":{"DisabledSecurityControlIdentifiers": ["CloudWatch.1"], "SecurityControlCustomParameters": [{"SecurityControlId": "ACM.1", "Parameters": {"daysToExpiration": {"ValueType": "CUSTOM", "Value": {"Integer": 21}}}}]}}}' \ + --updated-reason "Disabling CloudWatch.1 and changing parameter value" + +Output:: + + { + "Arn": "arn:aws:securityhub:eu-central-1:123456789012:configuration-policy/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "Id": "a1b2c3d4-5678-90ab-cdef-EXAMPLE11111", + "Name": "SampleConfigurationPolicyUpdated", + "Description": "SampleDescriptionUpdated", + "UpdatedAt": "2023-11-28T20:28:04.494000+00:00", + "CreatedAt": "2023-11-28T20:28:04.494000+00:00", + "ConfigurationPolicy": { + "SecurityHub": { + "ServiceEnabled": true, + "EnabledStandardIdentifiers": [ + "arn:aws:securityhub:eu-central-1::standards/aws-foundational-security-best-practices/v/1.0.0", + "arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0" + ], + "SecurityControlsConfiguration": { + "DisabledSecurityControlIdentifiers": [ + "CloudWatch.1" + ], + "SecurityControlCustomParameters": [ + { + "SecurityControlId": "ACM.1", + "Parameters": { + "daysToExpiration": { + "ValueType": "CUSTOM", + "Value": { + "Integer": 21 + } + } + } + } + ] + } + } + } + } + +For more information, see `Updating Security Hub configuration policies `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/update-organization-configuration.rst awscli-2.15.22/awscli/examples/securityhub/update-organization-configuration.rst --- awscli-2.15.9/awscli/examples/securityhub/update-organization-configuration.rst 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/update-organization-configuration.rst 2024-02-21 17:34:54.000000000 +0000 @@ -1,10 +1,11 @@ -**To configure Security Hub to automatically enable new organization accounts** +**To update how Security Hub is configured for an organization** -The following ``update-organization-configuration`` example configures Security Hub to automatically enable new accounts in an organization. :: +The following ``update-organization-configuration`` example specifies that Security Hub should use central configuration to configure an organization. After running this command, the delegated Security Hub administrator can create and manage configuration policies to configure the organization. The delegated administrator can also use this command to switch from central to local configuration. If local configuration is the configuration type, the delegated administrator can choose whether to automatically enable Security Hub and default security standards in new organization accounts. :: aws securityhub update-organization-configuration \ - --auto-enable + --no-auto-enable \ + --organization-configuration '{"ConfigurationType": "CENTRAL"}' This command produces no output. -For more information, see `Automatically enabling new organization accounts `__ in the *AWS Security Hub User Guide*. +For more information, see `Managing accounts with AWS Organizations `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/securityhub/update-security-control.rst awscli-2.15.22/awscli/examples/securityhub/update-security-control.rst --- awscli-2.15.9/awscli/examples/securityhub/update-security-control.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/securityhub/update-security-control.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +**To update security control properties** + +The following ``update-security-control`` example specifies custom values for a Security Hub security control parameter. :: + + aws securityhub update-security-control \ + --security-control-id ACM.1 \ + --parameters '{"daysToExpiration": {"ValueType": "CUSTOM", "Value": {"Integer": 15}}}' \ + --last-update-reason "Internal compliance requirement" + +This command produces no output. + +For more information, see `Custom control parameters `__ in the *AWS Security Hub User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/sts/decode-authorization-message.rst awscli-2.15.22/awscli/examples/sts/decode-authorization-message.rst --- awscli-2.15.9/awscli/examples/sts/decode-authorization-message.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/sts/decode-authorization-message.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,14 @@ +**To decode an encoded authorization message returned in response to a request** + +The following ``decode-authorization-message`` example decodes additional information about the authorization status of a request from an encoded message returned in response to an Amazon Web Services request. :: + + aws sts decode-authorization-message \ + --encoded-message EXAMPLEWodyRNrtlQARDip-eTA6i6DrlUhHhPQrLWB_lAbl5pAKxl9mPDLexYcGBreyIKQC1BGBIpBKr3dFDkwqeO7e2NMk5j_hmzAiChJN-8oy3EwiCjkUW5fdRNjcRvscGlUo_MhqHqHpR-Ojau7BMjOTWwOtHPhV_Zaz87yENdipr745EjQwRd5LaoL3vN8_5ZfA9UiBMKDgVh1gjqZJFUiQoubv78V1RbHNYnK44ElGKmUWYa020I1y6TNS9LXoNmc62GzkfGvoPGhD13br5tXEOo1rAm3vsPewRDFNkYL-4_1MWWezhRNEpqvXBDXLI9xEux7YYkRtjd45NJLFzZynBUubV8NHOevVuighd1Mvz3OiA-1_oPSe4TBtjfN9s7kjU1z70WpVbUgrLVp1xXTK1rf9Ea7t8shPd-3VzKhjS5tLrweFxNOKwV2GtT76B_fRp8HTYz-pOu3FZjwYStfvTb3GHs3-6rLribGO9jZOktkfE6vqxlFzLyeDr4P2ihC1wty9tArCvvGzIAUNmARQJ2VVWPxioqgoqCzMaDMZEO7wkku7QeakEVZdf00qlNLMmcaVZb1UPNqD-JWP5pwe_mAyqh0NLw-r1S56YC_90onj9A80sNrHlI-tIiNd7tgNTYzDuPQYD2FMDBnp82V9eVmYGtPp5NIeSpuf3fOHanFuBZgENxZQZ2dlH3xJGMTtYayzZrRXjiq_SfX9zeBbpCvrD-0AJK477RM84vmtCrsUpJgx-FaoPIb8LmmKVBLpIB0iFhU9sEHPqKHVPi6jdxXqKaZaFGvYVmVOiuQdNQKuyk0p067POFrZECLjjOtNPBOZCcuEKEXAMPLE + +Output:: + + { + "DecodedMessage": "{\"allowed\":false,\"explicitDeny\":true,\"matchedStatements\":{\"items\":[{\"statementId\":\"VisualEditor0\",\"effect\":\"DENY\",\"principals\":{\"items\":[{\"value\":\"AROA123456789EXAMPLE\"}]},\"principalGroups\":{\"items\":[]},\"actions\":{\"items\":[{\"value\":\"ec2:RunInstances\"}]},\"resources\":{\"items\":[{\"value\":\"*\"}]},\"conditions\":{\"items\":[]}}]},\"failures\":{\"items\":[]},\"context\":{\"principal\":{\"id\":\"AROA123456789EXAMPLE:Ana\",\"arn\":\"arn:aws:sts::111122223333:assumed-role/Developer/Ana\"},\"action\":\"RunInstances\",\"resource\":\"arn:aws:ec2:us-east-1:111122223333:instance/*\",\"conditions\":{\"items\":[{\"key\":\"ec2:MetadataHttpPutResponseHopLimit\",\"values\":{\"items\":[{\"value\":\"2\"}]}},{\"key\":\"ec2:InstanceMarketType\",\"values\":{\"items\":[{\"value\":\"on-demand\"}]}},{\"key\":\"aws:Resource\",\"values\":{\"items\":[{\"value\":\"instance/*\"}]}},{\"key\":\"aws:Account\",\"values\":{\"items\":[{\"value\":\"111122223333\"}]}},{\"key\":\"ec2:AvailabilityZone\",\"values\":{\"items\":[{\"value\":\"us-east-1f\"}]}},{\"key\":\"ec2:ebsOptimized\",\"values\":{\"items\":[{\"value\":\"false\"}]}},{\"key\":\"ec2:IsLaunchTemplateResource\",\"values\":{\"items\":[{\"value\":\"false\"}]}},{\"key\":\"ec2:InstanceType\",\"values\":{\"items\":[{\"value\":\"t2.micro\"}]}},{\"key\":\"ec2:RootDeviceType\",\"values\":{\"items\":[{\"value\":\"ebs\"}]}},{\"key\":\"aws:Region\",\"values\":{\"items\":[{\"value\":\"us-east-1\"}]}},{\"key\":\"ec2:MetadataHttpEndpoint\",\"values\":{\"items\":[{\"value\":\"enabled\"}]}},{\"key\":\"aws:Service\",\"values\":{\"items\":[{\"value\":\"ec2\"}]}},{\"key\":\"ec2:InstanceID\",\"values\":{\"items\":[{\"value\":\"*\"}]}},{\"key\":\"ec2:MetadataHttpTokens\",\"values\":{\"items\":[{\"value\":\"required\"}]}},{\"key\":\"aws:Type\",\"values\":{\"items\":[{\"value\":\"instance\"}]}},{\"key\":\"ec2:Tenancy\",\"values\":{\"items\":[{\"value\":\"default\"}]}},{\"key\":\"ec2:Region\",\"values\":{\"items\":[{\"value\":\"us-east-1\"}]}},{\"key\":\"aws:ARN\",\"values\":{\"items\":[{\"value\":\"arn:aws:ec2:us-east-1:111122223333:instance/*\"}]}}]}}}" + } + +For more information, see `Policy evaluation logic `__ in the *AWS IAM User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/sts/get-federation-token.rst awscli-2.15.22/awscli/examples/sts/get-federation-token.rst --- awscli-2.15.9/awscli/examples/sts/get-federation-token.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/sts/get-federation-token.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,59 @@ +**To return a set of temporary security credentials using IAM user access key credentials** + +The following ``get-federation-token`` example returns a set of temporary security credentials (consisting of an access key ID, a secret access key, and a security token) for a user. You must call the ``GetFederationToken`` operation using the long-term security credentials of an IAM user. :: + + aws sts get-federation-token \ + --name Bob \ + --policy file://myfile.json \ + --policy-arns arn=arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess \ + --duration-seconds 900 + +Contents of ``myfile.json``:: + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:Describe*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "elasticloadbalancing:Describe*", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "cloudwatch:ListMetrics", + "cloudwatch:GetMetricStatistics", + "cloudwatch:Describe*" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "autoscaling:Describe*", + "Resource": "*" + } + ] + } + +Output:: + + { + "Credentials": { + "AccessKeyId": "ASIAIOSFODNN7EXAMPLE", + "SecretAccessKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + "SessionToken": "EXAMPLEpZ2luX2VjEGoaCXVzLXdlc3QtMiJIMEYCIQC/W9pL5ArQyDD5JwFL3/h5+WGopQ24GEXweNctwhi9sgIhAMkg+MZE35iWM8s4r5Lr25f9rSTVPFH98G42QQunWMTfKq0DCOP//////////wEQAxoMNDUyOTI1MTcwNTA3Igxuy3AOpuuoLsk3MJwqgQPg8QOd9HuoClUxq26wnc/nm+eZLjHDyGf2KUAHK2DuaS/nrGSEXAMPLE", + "Expiration": "2023-12-20T02:06:07+00:00" + }, + "FederatedUser": { + "FederatedUserId": "111122223333:Bob", + "Arn": "arn:aws:sts::111122223333:federated-user/Bob" + }, + "PackedPolicySize": 36 + } + +For more information, see `Requesting Temporary Security Credentials `__ in the *AWS IAM User Guide*. diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/get-organization-recommendation.rst awscli-2.15.22/awscli/examples/trustedadvisor/get-organization-recommendation.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/get-organization-recommendation.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/get-organization-recommendation.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,35 @@ +**To get an organization recommendation** + +The following ``get-organization-recommendation`` example gets an organization recommendation by its identifier. :: + + aws trustedadvisor get-organization-recommendation \ + --organization-recommendation-identifier arn:aws:trustedadvisor:::organization-recommendation/9534ec9b-bf3a-44e8-8213-2ed68b39d9d5 + +Output:: + + { + "organizationRecommendation": { + "arn": "arn:aws:trustedadvisor:::organization-recommendation/9534ec9b-bf3a-44e8-8213-2ed68b39d9d5", + "name": "Lambda Runtime Deprecation Warning", + "description": "One or more lambdas are using a deprecated runtime", + "awsServices": [ + "lambda" + ], + "checkArn": "arn:aws:trustedadvisor:::check/L4dfs2Q4C5", + "id": "9534ec9b-bf3a-44e8-8213-2ed68b39d9d5", + "lifecycleStage": "resolved", + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 0, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "warning", + "type": "priority" + } + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/get-recommendation.rst awscli-2.15.22/awscli/examples/trustedadvisor/get-recommendation.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/get-recommendation.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/get-recommendation.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,41 @@ +**To get a recommendation** + +The following ``get-recommendation`` example gets a recommendation by its identifier. :: + + aws trustedadvisor get-recommendation \ + --recommendation-identifier arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578 + +Output:: + + { + "recommendation": { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578", + "name": "MFA Recommendation", + "description": "Enable multi-factor authentication", + "awsServices": [ + "iam" + ], + "checkArn": "arn:aws:trustedadvisor:::check/7DAFEmoDos", + "id": "55fa4d2e-bbb7-491a-833b-5773e9589578", + "lastUpdatedAt": "2023-11-01T15:57:58.673Z", + "pillarSpecificAggregates": { + "costOptimizing": { + "estimatedMonthlySavings": 0.0, + "estimatedPercentMonthlySavings": 0.0 + } + }, + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 1, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "error", + "type": "standard" + } + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/list-checks.rst awscli-2.15.22/awscli/examples/trustedadvisor/list-checks.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/list-checks.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/list-checks.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,95 @@ +**To list Trusted Advisor checks** + +The following ``list-checks`` example lists all Trusted Advisor checks. :: + + aws trustedadvisor list-checks + +Output:: + + { + "checkSummaries": [ + { + "arn": "arn:aws:trustedadvisor:::check/1iG5NDGVre", + "awsServices": [ + "EC2" + ], + "description": "Checks security groups for rules that allow unrestricted access to a resource. Unrestricted access increases opportunities for malicious activity (hacking, denial-of-service attacks, loss of data)", + "id": "1iG5NDGVre", + "metadata": { + "0": "Region", + "1": "Security Group Name", + "2": "Security Group ID", + "3": "Protocol", + "4": "Port", + "5": "Status", + "6": "IP Range" + }, + "name": "Security Groups - Unrestricted Access", + "pillars": [ + "security" + ], + "source": "ta_check" + }, + { + "arn": "arn:aws:trustedadvisor:::check/1qazXsw23e", + "awsServices": [ + "RDS" + ], + "description": "Checks your usage of RDS and provides recommendations on purchase of Reserved Instances to help reduce costs incurred from using RDS On-Demand. AWS generates these recommendations by analyzing your On-Demand usage for the past 30 days. We then simulate every combination of reservations in the generated category of usage in order to identify the best number of each type of Reserved Instance to purchase to maximize your savings. This check covers recommendations based on partial upfront payment option with 1-year or 3-year commitment. This check is not available to accounts linked in Consolidated Billing. Recommendations are only available for the Paying Account.", + "id": "1qazXsw23e", + "metadata": { + "0": "Region", + "1": "Family", + "2": "Instance Type", + "3": "License Model", + "4": "Database Edition", + "5": "Database Engine", + "6": "Deployment Option", + "7": "Recommended number of Reserved Instances to purchase", + "8": "Expected Average Reserved Instance Utilization", + "9": "Estimated Savings with Recommendation (monthly)" + "10": "Upfront Cost of Reserved Instances", + "11": "Estimated cost of Reserved Instances (monthly)", + "12": "Estimated On-Demand Cost Post Recommended Reserved Instance Purchase (monthly)", + "13": "Estimated Break Even (months)", + "14": "Lookback Period (days)", + "15": "Term (years)" + }, + "name": "Amazon Relational Database Service (RDS) Reserved Instance Optimization", + "pillars": [ + "cost_optimizing" + ], + "source": "ta_check" + }, + { + "arn": "arn:aws:trustedadvisor:::check/1qw23er45t", + "awsServices": [ + "Redshift" + ], + "description": "Checks your usage of Redshift and provides recommendations on purchase of Reserved Nodes to help reduce costs incurred from using Redshift On-Demand. AWS generates these recommendations by analyzing your On-Demand usage for the past 30 days. We then simulate every combination of reservations in the generated category of usage in order to identify the best number of each type of Reserved Nodes to purchase to maximize your savings. This check covers recommendations based on partial upfront payment option with 1-year or 3-year commitment. This check is not available to accounts linked in Consolidated Billing. Recommendations are only available for the Paying Account.", + "id": "1qw23er45t", + "metadata": { + "0": "Region", + "1": "Family", + "2": "Node Type", + "3": "Recommended number of Reserved Nodes to purchase", + "4": "Expected Average Reserved Node Utilization", + "5": "Estimated Savings with Recommendation (monthly)", + "6": "Upfront Cost of Reserved Nodes", + "7": "Estimated cost of Reserved Nodes (monthly)", + "8": "Estimated On-Demand Cost Post Recommended Reserved Nodes Purchase (monthly)", + "9": "Estimated Break Even (months)", + "10": "Lookback Period (days)", + "11": "Term (years)", + }, + "name": "Amazon Redshift Reserved Node Optimization", + "pillars": [ + "cost_optimizing" + ], + "source": "ta_check" + }, + ], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/list-organization-recommendation-accounts.rst awscli-2.15.22/awscli/examples/trustedadvisor/list-organization-recommendation-accounts.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/list-organization-recommendation-accounts.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/list-organization-recommendation-accounts.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,22 @@ +**To list organization recommendation accounts** + +The following ``list-organization-recommendation-accounts`` example lists all account recommendation summaries for an organization recommendation by its identifier. :: + + aws trustedadvisor list-organization-recommendation-accounts \ + --organization-recommendation-identifier arn:aws:trustedadvisor:::organization-recommendation/9534ec9b-bf3a-44e8-8213-2ed68b39d9d5 + +Output:: + + { + "accountRecommendationLifecycleSummaries": [{ + "accountId": "000000000000", + "accountRecommendationArn": "arn:aws:trustedadvisor::000000000000:recommendation/9534ec9b-bf3a-44e8-8213-2ed68b39d9d5", + "lifecycleStage": "resolved", + "updateReason": "Resolved issue", + "updateReasonCode": "valid_business_case", + "lastUpdatedAt": "2023-01-17T18:25:44.552Z" + }], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/list-organization-recommendation-resources.rst awscli-2.15.22/awscli/examples/trustedadvisor/list-organization-recommendation-resources.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/list-organization-recommendation-resources.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/list-organization-recommendation-resources.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,73 @@ +**To list organization recommendation resources** + +The following ``list-organization-recommendation-resources`` example lists all resources for an organization recommendation by its identifier. :: + + aws trustedadvisor list-organization-recommendation-resources \ + --organization-recommendation-identifier arn:aws:trustedadvisor:::organization-recommendation/5a694939-2e54-45a2-ae72-730598fa89d0 + +Output:: + + { + "organizationRecommendationResourceSummaries": [ + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation-resource/5a694939-2e54-45a2-ae72-730598fa89d0/bb38affc0ce0681d9a6cd13f30238ba03a8f63dfe7a379dc403c619119d86af", + "awsResourceId": "database-1-instance-1", + "id": "bb38affc0ce0681d9a6cd13f302383ba03a8f63dfe7a379dc403c619119d86af", + "lastUpdatedAt": "2023-11-01T15:09:51.891Z", + "metadata": { + "0": "14", + "1": "208.79999999999998", + "2": "database-1-instance-1", + "3": "db.r5.large", + "4": "false", + "5": "us-west-2", + "6": "arn:aws:rds:us-west-2:000000000000:db:database-1-instance-1", + "7": "1" + }, + "recommendationArn": "arn:aws:trustedadvisor:::organization-recommendation/5a694939-2e54-45a2-ae72-730598fa89d0", + "regionCode": "us-west-2", + "status": "warning" + }, + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation-resource/5a694939-2e54-45a2-ae72-730598fa89d0/51fded4d7a3278818df9cfe344ff5762cec46c095a6763d1ba1ba53bd0e1b0e6", + "awsResourceId": "database-1", + "id": "51fded4d7a3278818df9cfe344ff5762cec46c095a6763d1ba1ba53bd0e1b0e6", + "lastUpdatedAt": "2023-11-01T15:09:51.891Z", + "metadata": { + "0": "14", + "1": "31.679999999999996", + "2": "database-1", + "3": "db.t3.small", + "4": "false", + "5": "us-west-2", + "6": "arn:aws:rds:us-west-2:000000000000:db:database-1", + "7": "20" + }, + "recommendationArn": "arn:aws:trustedadvisor:::organization-recommendation/5a694939-2e54-45a2-ae72-730598fa89d0", + "regionCode": "us-west-2", + "status": "warning" + }, + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation-resource/5a694939-2e54-45a2-ae72-730598fa89d0/f4d01bd20f4cd5372062aafc8786c489e48f0ead7cdab121463bf9f89e40a36b", + "awsResourceId": "database-2-instance-1-us-west-2a", + "id": "f4d01bd20f4cd5372062aafc8786c489e48f0ead7cdab121463bf9f89e40a36b", + "lastUpdatedAt": "2023-11-01T15:09:51.891Z", + "metadata": { + "0": "14", + "1": "187.20000000000002", + "2": "database-2-instance-1-us-west-2a", + "3": "db.r6g.large", + "4": "true", + "5": "us-west-2", + "6": "arn:aws:rds:us-west-2:000000000000:db:database-2-instance-1-us-west-2a", + "7": "1" + }, + "recommendationArn": "arn:aws:trustedadvisor:::organization-recommendation/5a694939-2e54-45a2-ae72-730598fa89d0", + "regionCode": "us-west-2", + "status": "warning" + }, + ], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/list-organization-recommendations.rst awscli-2.15.22/awscli/examples/trustedadvisor/list-organization-recommendations.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/list-organization-recommendations.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/list-organization-recommendations.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,131 @@ +**Example 1: To list organization recommendations** + +The following ``list-organization-recommendations`` example lists all organization recommendations and does not include a filter. :: + + aws trustedadvisor list-organization-recommendations + +Output:: + + { + "organizationRecommendationSummaries": [ + { + "arn": "arn:aws:trustedadvisor:::organization-recommendation/9534ec9b-bf3a-44e8-8213-2ed68b39d9d5", + "name": "Lambda Runtime Deprecation Warning", + "awsServices": [ + "lambda" + ], + "checkArn": "arn:aws:trustedadvisor:::check/L4dfs2Q4C5", + "id": "9534ec9b-bf3a-44e8-8213-2ed68b39d9d5", + "lifecycleStage": "resolved", + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 0, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "warning", + "type": "priority" + }, + { + "arn": "arn:aws:trustedadvisor:::organization-recommendation/4ecff4d4-1bc1-4c99-a5b8-0fff9ee500d6", + "name": "Lambda Runtime Deprecation Warning", + "awsServices": [ + "lambda" + ], + "checkArn": "arn:aws:trustedadvisor:::check/L4dfs2Q4C5", + "id": "4ecff4d4-1bc1-4c99-a5b8-0fff9ee500d6", + "lifecycleStage": "resolved", + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 0, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "warning", + "type": "priority" + }, + ], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. + +**Example 2: To list organization recommendations with a filter** + +The following ``list-organization-recommendations`` example filters and returns a max of one organization recommendation that is a part of the "security" pillar. :: + + aws trustedadvisor list-organization-recommendations \ + --pillar security \ + --max-items 100 + +Output:: + + { + "organizationRecommendationSummaries": [{ + "arn": "arn:aws:trustedadvisor:::organization-recommendation/9534ec9b-bf3a-44e8-8213-2ed68b39d9d5", + "name": "Lambda Runtime Deprecation Warning", + "awsServices": [ + "lambda" + ], + "checkArn": "arn:aws:trustedadvisor:::check/L4dfs2Q4C5", + "id": "9534ec9b-bf3a-44e8-8213-2ed68b39d9d5", + "lifecycleStage": "resolved", + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 0, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "warning", + "type": "priority" + }], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. + +**Example 3: To list organization recommendations with a pagination token** + +The following ``list-organization-recommendations`` example uses the "nextToken" returned from a previous request to fetch the next page of organization recommendations. :: + + aws trustedadvisor list-organization-recommendations \ + --pillar security \ + --max-items 100 \ + --starting-token + +Output:: + + { + "organizationRecommendationSummaries": [{ + "arn": "arn:aws:trustedadvisor:::organization-recommendation/4ecff4d4-1bc1-4c99-a5b8-0fff9ee500d6", + "name": "Lambda Runtime Deprecation Warning", + "awsServices": [ + "lambda" + ], + "checkArn": "arn:aws:trustedadvisor:::check/L4dfs2Q4C5", + "id": "4ecff4d4-1bc1-4c99-a5b8-0fff9ee500d6", + "lifecycleStage": "resolved", + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 0, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "warning", + "type": "priority" + }] + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/list-recommendation-resources.rst awscli-2.15.22/awscli/examples/trustedadvisor/list-recommendation-resources.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/list-recommendation-resources.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/list-recommendation-resources.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,73 @@ +**To list recommendation resources** + +The following ``list-recommendation-resources`` example lists all resources for a recommendation by its identifier. :: + + aws trustedadvisor list-recommendation-resources \ + --recommendation-identifier arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578 + +Output:: + + { + "recommendationResourceSummaries": [ + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation-resource/55fa4d2e-bbb7-491a-833b-5773e9589578/18959a1f1973cff8e706e9d9bde28bba36cd602a6b2cb86c8b61252835236010", + "id": "18959a1f1973cff8e706e9d9bde28bba36cd602a6b2cb86c8b61252835236010", + "awsResourceId": "webcms-dev-01", + "lastUpdatedAt": "2023-11-01T15:09:51.891Z", + "metadata": { + "0": "14", + "1": "123.12000000000002", + "2": "webcms-dev-01", + "3": "db.m6i.large", + "4": "false", + "5": "us-east-1", + "6": "arn:aws:rds:us-east-1:000000000000:db:webcms-dev-01", + "7": "20" + }, + "recommendationArn": "arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578", + "regionCode": "us-east-1", + "status": "warning" + }, + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation-resource/55fa4d2e-bbb7-491a-833b-5773e9589578/e6367ff500ac90db8e4adeb4892e39ee9c36bbf812dcbce4b9e4fefcec9eb63e", + "id": "e6367ff500ac90db8e4adeb4892e39ee9c36bbf812dcbce4b9e4fefcec9eb63e", + "awsResourceId": "aws-dev-db-stack-instance-1", + "lastUpdatedAt": "2023-11-01T15:09:51.891Z", + "metadata": { + "0": "14", + "1": "29.52", + "2": "aws-dev-db-stack-instance-1", + "3": "db.t2.small", + "4": "false", + "5": "us-east-1", + "6": "arn:aws:rds:us-east-1:000000000000:db:aws-dev-db-stack-instance-1", + "7": "1" + }, + "recommendationArn": "arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578", + "regionCode": "us-east-1", + "status": "warning" + }, + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation-resource/55fa4d2e-bbb7-491a-833b-5773e9589578/31aa78ba050a5015d2d38cca7f5f1ce88f70857c4e1c3ad03f8f9fd95dad7459", + "id": "31aa78ba050a5015d2d38cca7f5f1ce88f70857c4e1c3ad03f8f9fd95dad7459", + "awsResourceId": "aws-awesome-apps-stack-db", + "lastUpdatedAt": "2023-11-01T15:09:51.891Z", + "metadata": { + "0": "14", + "1": "114.48000000000002", + "2": "aws-awesome-apps-stack-db", + "3": "db.m6g.large", + "4": "false", + "5": "us-east-1", + "6": "arn:aws:rds:us-east-1:000000000000:db:aws-awesome-apps-stack-db", + "7": "100" + }, + "recommendationArn": "arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578", + "regionCode": "us-east-1", + "status": "warning" + } + ], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/list-recommendations.rst awscli-2.15.22/awscli/examples/trustedadvisor/list-recommendations.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/list-recommendations.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/list-recommendations.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,155 @@ +**Example 1: To list recommendations** + +The following ``list-recommendations`` example lists all recommendations and does not include a filter. :: + + aws trustedadvisor list-recommendations + +Output:: + + { + "recommendationSummaries": [ + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578", + "name": "MFA Recommendation", + "awsServices": [ + "iam" + ], + "checkArn": "arn:aws:trustedadvisor:::check/7DAFEmoDos", + "id": "55fa4d2e-bbb7-491a-833b-5773e9589578", + "lastUpdatedAt": "2023-11-01T15:57:58.673Z", + "pillarSpecificAggregates": { + "costOptimizing": { + "estimatedMonthlySavings": 0.0, + "estimatedPercentMonthlySavings": 0.0 + } + }, + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 1, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "error", + "type": "standard" + }, + { + "arn": "arn:aws:trustedadvisor::000000000000:recommendation/8b602b6f-452d-4cb2-8a9e-c7650955d9cd", + "name": "RDS clusters quota warning", + "awsServices": [ + "rds" + ], + "checkArn": "arn:aws:trustedadvisor:::check/gjqMBn6pjz", + "id": "8b602b6f-452d-4cb2-8a9e-c7650955d9cd", + "lastUpdatedAt": "2023-11-01T15:58:17.397Z", + "pillarSpecificAggregates": { + "costOptimizing": { + "estimatedMonthlySavings": 0.0, + "estimatedPercentMonthlySavings": 0.0 + } + }, + "pillars": [ + "service_limits" + ], + "resourcesAggregates": { + "errorCount": 0, + "okCount": 3, + "warningCount": 6 + }, + "source": "ta_check", + "status": "warning", + "type": "standard" + } + ], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. + +**Example 2: To list recommendations with a filter** + +The following ``list-recommendations`` example lists recommendations and includes a filter. :: + + aws trustedadvisor list-recommendations \ + --aws-service iam \ + --max-items 100 + +Output:: + + { + "recommendationSummaries": [{ + "arn": "arn:aws:trustedadvisor::000000000000:recommendation/55fa4d2e-bbb7-491a-833b-5773e9589578", + "name": "MFA Recommendation", + "awsServices": [ + "iam" + ], + "checkArn": "arn:aws:trustedadvisor:::check/7DAFEmoDos", + "id": "55fa4d2e-bbb7-491a-833b-5773e9589578", + "lastUpdatedAt": "2023-11-01T15:57:58.673Z", + "pillarSpecificAggregates": { + "costOptimizing": { + "estimatedMonthlySavings": 0.0, + "estimatedPercentMonthlySavings": 0.0 + } + }, + "pillars": [ + "security" + ], + "resourcesAggregates": { + "errorCount": 1, + "okCount": 0, + "warningCount": 0 + }, + "source": "ta_check", + "status": "error", + "type": "standard" + }], + "nextToken": "REDACTED" + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. + +**Example 3: To list recommendations with a pagination token** + +The following ``list-recommendations`` example uses the "nextToken" returned from a previous request to fetch the next page of filtered Recommendations. :: + + aws trustedadvisor list-recommendations \ + --aws-service rds \ + --max-items 100 \ + --starting-token + +Output:: + + { + "recommendationSummaries": [{ + "arn": "arn:aws:trustedadvisor::000000000000:recommendation/8b602b6f-452d-4cb2-8a9e-c7650955d9cd", + "name": "RDS clusters quota warning", + "awsServices": [ + "rds" + ], + "checkArn": "arn:aws:trustedadvisor:::check/gjqMBn6pjz", + "id": "8b602b6f-452d-4cb2-8a9e-c7650955d9cd", + "lastUpdatedAt": "2023-11-01T15:58:17.397Z", + "pillarSpecificAggregates": { + "costOptimizing": { + "estimatedMonthlySavings": 0.0, + "estimatedPercentMonthlySavings": 0.0 + } + }, + "pillars": [ + "service_limits" + ], + "resourcesAggregates": { + "errorCount": 0, + "okCount": 3, + "warningCount": 6 + }, + "source": "ta_check", + "status": "warning", + "type": "standard" + }] + } + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/update-organization-recommendation-lifecycle.rst awscli-2.15.22/awscli/examples/trustedadvisor/update-organization-recommendation-lifecycle.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/update-organization-recommendation-lifecycle.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/update-organization-recommendation-lifecycle.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +**To update an organization recommendation lifecycle** + +The following ``update-organization-recommendation-lifecycle`` example updates the lifecycle of an organization recommendation by its identifier. :: + + aws trustedadvisor update-organization-recommendation-lifecycle \ + --organization-recommendation-identifier arn:aws:trustedadvisor:::organization-recommendation/96b5e5ca-7930-444c-90c6-06d386128100 \ + --lifecycle-stage dismissed \ + --update-reason-code not_applicable + +This command produces no output. + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/awscli/examples/trustedadvisor/update-recommendation-lifecycle.rst awscli-2.15.22/awscli/examples/trustedadvisor/update-recommendation-lifecycle.rst --- awscli-2.15.9/awscli/examples/trustedadvisor/update-recommendation-lifecycle.rst 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/awscli/examples/trustedadvisor/update-recommendation-lifecycle.rst 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +**To update a recommendation lifecycle** + +The following ``update-recommendation-lifecycle`` example updates the lifecycle of a recommendation by its identifier. :: + + aws trustedadvisor update-recommendation-lifecycle \ + --recommendation-identifier arn:aws:trustedadvisor::000000000000:recommendation/861c9c6e-f169-405a-8b59-537a8caccd7a \ + --lifecycle-stage resolved \ + --update-reason-code valid_business_case + +This command produces no output. + +For more information, see `Get started with the Trusted Advisor API `__ in the *AWS Trusted Advisor User Guide*. \ No newline at end of file diff -Nru awscli-2.15.9/configure awscli-2.15.22/configure --- awscli-2.15.9/configure 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/configure 2024-02-21 17:34:54.000000000 +0000 @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.71 for awscli 2.15.9. +# Generated by GNU Autoconf 2.71 for awscli 2.15.22. # # # Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, @@ -607,8 +607,8 @@ # Identity of this package. PACKAGE_NAME='awscli' PACKAGE_TARNAME='awscli' -PACKAGE_VERSION='2.15.9' -PACKAGE_STRING='awscli 2.15.9' +PACKAGE_VERSION='2.15.22' +PACKAGE_STRING='awscli 2.15.22' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1255,7 +1255,7 @@ fi if $ac_init_version; then cat <<\_ACEOF -awscli configure 2.15.9 +awscli configure 2.15.22 generated by GNU Autoconf 2.71 Copyright (C) 2021 Free Software Foundation, Inc. @@ -1292,7 +1292,7 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by awscli $as_me 2.15.9, which was +It was created by awscli $as_me 2.15.22, which was generated by GNU Autoconf 2.71. Invocation command line was $ $0$ac_configure_args_raw @@ -2668,7 +2668,7 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by awscli $as_me 2.15.9, which was +This file was extended by awscli $as_me 2.15.22, which was generated by GNU Autoconf 2.71. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -2723,7 +2723,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config='$ac_cs_config_escaped' ac_cs_version="\\ -awscli config.status 2.15.9 +awscli config.status 2.15.22 configured by $0, generated by GNU Autoconf 2.71, with options \\"\$ac_cs_config\\" diff -Nru awscli-2.15.9/configure.ac awscli-2.15.22/configure.ac --- awscli-2.15.9/configure.ac 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/configure.ac 2024-02-21 17:34:54.000000000 +0000 @@ -1,5 +1,5 @@ AC_CONFIG_MACRO_DIRS([m4]) -AC_INIT([awscli], [2.15.9]) +AC_INIT([awscli], [2.15.22]) AC_CONFIG_SRCDIR([bin/aws]) AM_PATH_PYTHON([3.8]) diff -Nru awscli-2.15.9/debian/changelog awscli-2.15.22/debian/changelog --- awscli-2.15.9/debian/changelog 2024-01-12 18:51:10.000000000 +0000 +++ awscli-2.15.22/debian/changelog 2024-02-22 23:29:01.000000000 +0000 @@ -1,3 +1,9 @@ +awscli (2.15.22-1) unstable; urgency=medium + + * New upstream version 2.15.22 + + -- Noah Meyerhans Thu, 22 Feb 2024 15:29:01 -0800 + awscli (2.15.9-1) unstable; urgency=medium * New upstream version 2.15.9 (Closess: #1060287) diff -Nru awscli-2.15.9/doc/source/conf.py awscli-2.15.22/doc/source/conf.py --- awscli-2.15.9/doc/source/conf.py 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/doc/source/conf.py 2024-02-21 17:34:54.000000000 +0000 @@ -71,7 +71,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.15.9' +release = '2.15.22' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff -Nru awscli-2.15.9/pyproject.toml awscli-2.15.22/pyproject.toml --- awscli-2.15.9/pyproject.toml 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/pyproject.toml 2024-02-21 17:34:54.000000000 +0000 @@ -1,6 +1,6 @@ [build-system] requires = [ -"flit_core>=3.7.1,<3.8.1", +"flit_core>=3.7.1,<3.9.1", ] build-backend = "pep517" backend-path = ["backends"] diff -Nru awscli-2.15.9/requirements/bootstrap.txt awscli-2.15.22/requirements/bootstrap.txt --- awscli-2.15.9/requirements/bootstrap.txt 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/requirements/bootstrap.txt 2024-02-21 17:34:54.000000000 +0000 @@ -1,2 +1,2 @@ pip>=22.0.0,<24.0.0 -flit_core>=3.7.1,<3.8.1 +flit_core>=3.7.1,<3.9.1 diff -Nru awscli-2.15.9/requirements/download-deps/bootstrap-lock.txt awscli-2.15.22/requirements/download-deps/bootstrap-lock.txt --- awscli-2.15.9/requirements/download-deps/bootstrap-lock.txt 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/requirements/download-deps/bootstrap-lock.txt 2024-02-21 17:34:54.000000000 +0000 @@ -6,9 +6,9 @@ # # The following packages are considered to be unsafe in a requirements file: -flit-core==3.8.0 \ - --hash=sha256:64a29ec845164a6abe1136bf4bc5ae012bdfe758ed42fc7571a9059a7c80bd83 \ - --hash=sha256:b305b30c99526df5e63d6022dd2310a0a941a187bd3884f4c8ef0418df6c39f3 +flit-core==3.9.0 \ + --hash=sha256:72ad266176c4a3fcfab5f2930d76896059851240570ce9a98733b658cb786eba \ + --hash=sha256:7aada352fb0c7f5538c4fafeddf314d3a6a92ee8e2b1de70482329e42de70301 # via -r requirements/download-deps/../bootstrap.txt pip==23.3.2 \ --hash=sha256:5052d7889c1f9d05224cd41741acb7c5d6fa735ab34e339624a614eaaa7e7d76 \ diff -Nru awscli-2.15.9/requirements/download-deps/bootstrap-win-lock.txt awscli-2.15.22/requirements/download-deps/bootstrap-win-lock.txt --- awscli-2.15.9/requirements/download-deps/bootstrap-win-lock.txt 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/requirements/download-deps/bootstrap-win-lock.txt 2024-02-21 17:34:54.000000000 +0000 @@ -6,9 +6,9 @@ # # The following packages are considered to be unsafe in a requirements file: -flit-core==3.8.0 \ - --hash=sha256:64a29ec845164a6abe1136bf4bc5ae012bdfe758ed42fc7571a9059a7c80bd83 \ - --hash=sha256:b305b30c99526df5e63d6022dd2310a0a941a187bd3884f4c8ef0418df6c39f3 +flit-core==3.9.0 \ + --hash=sha256:72ad266176c4a3fcfab5f2930d76896059851240570ce9a98733b658cb786eba \ + --hash=sha256:7aada352fb0c7f5538c4fafeddf314d3a6a92ee8e2b1de70482329e42de70301 # via -r requirements\download-deps\../bootstrap.txt pip==23.3.2 \ --hash=sha256:5052d7889c1f9d05224cd41741acb7c5d6fa735ab34e339624a614eaaa7e7d76 \ diff -Nru awscli-2.15.9/requirements-base.txt awscli-2.15.22/requirements-base.txt --- awscli-2.15.9/requirements-base.txt 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/requirements-base.txt 2024-02-21 17:34:54.000000000 +0000 @@ -1,2 +1,2 @@ wheel==0.38.4 -flit_core==3.8.0 +flit_core==3.9.0 diff -Nru awscli-2.15.9/requirements-docs.txt awscli-2.15.22/requirements-docs.txt --- awscli-2.15.9/requirements-docs.txt 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/requirements-docs.txt 2024-02-21 17:34:54.000000000 +0000 @@ -3,4 +3,13 @@ sphinx-notfound-page==0.4 # alabaster 0.7.14 dropped support for Sphinx<3.4 # and Sphinx 3.0.2 bounds alabaster>=0.7,<0.8. -alabaster==0.7.13 \ No newline at end of file +alabaster==0.7.13 +# Latest versions of these packages support only +# Sphinx>5, which we are not using. Set explicit pins +# so we can continue to use Sphinx 3. +sphinxcontrib-applehelp==1.0.4 +sphinxcontrib-devhelp==1.0.2 +sphinxcontrib-htmlhelp==2.0.1 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.3 +sphinxcontrib-serializinghtml==1.1.5 diff -Nru awscli-2.15.9/requirements-test.txt awscli-2.15.22/requirements-test.txt --- awscli-2.15.9/requirements-test.txt 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/requirements-test.txt 2024-02-21 17:34:54.000000000 +0000 @@ -5,3 +5,4 @@ pytest-cov==4.0.0 pytest-xdist==3.1.0 pip-tools==7.0.0 +packaging==23.2 diff -Nru awscli-2.15.9/scripts/ci/run-dep-tests awscli-2.15.22/scripts/ci/run-dep-tests --- awscli-2.15.9/scripts/ci/run-dep-tests 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/scripts/ci/run-dep-tests 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# Don't run tests from the root repo dir. +# We want to ensure we're importing from the installed +# binary package not from the CWD. + +import os +import sys +from contextlib import contextmanager +from subprocess import check_call + +_dname = os.path.dirname + +REPO_ROOT = _dname(_dname(_dname(os.path.abspath(__file__)))) + + +@contextmanager +def cd(path): + """Change directory while inside context manager.""" + cwd = os.getcwd() + try: + os.chdir(path) + yield + finally: + os.chdir(cwd) + + +def run(command): + env = os.environ.copy() + env['TESTS_REMOVE_REPO_ROOT_FROM_PATH'] = 'true' + return check_call(command, shell=True, env=env) + + +if __name__ == "__main__": + with cd(os.path.join(REPO_ROOT, "tests")): + run(f"{sys.executable} {REPO_ROOT}/scripts/ci/run-tests dependencies") diff -Nru awscli-2.15.9/tests/dependencies/__init__.py awscli-2.15.22/tests/dependencies/__init__.py --- awscli-2.15.9/tests/dependencies/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/tests/dependencies/__init__.py 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,12 @@ +# Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. diff -Nru awscli-2.15.9/tests/dependencies/test_closure.py awscli-2.15.22/tests/dependencies/test_closure.py --- awscli-2.15.9/tests/dependencies/test_closure.py 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/tests/dependencies/test_closure.py 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,183 @@ +# Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +import fnmatch +import functools +import importlib.metadata +import json +import os +import site +from typing import Dict, Iterator, List, Tuple + +import pytest +from packaging.requirements import Requirement + +_NESTED_STR_DICT = Dict[str, "_NESTED_STR_DICT"] + + +@pytest.fixture() +def awscli_package(): + return Package(name="awscli") + + +class Package: + def __init__(self, name: str) -> None: + self.name = name + + @functools.cached_property + def runtime_dependencies(self) -> "DependencyClosure": + return self._get_runtime_closure() + + def _get_runtime_closure(self) -> "DependencyClosure": + closure = DependencyClosure() + for requirement in self._get_runtime_requirements(): + if self._requirement_applies_to_environment(requirement): + closure[requirement] = Package(name=requirement.name) + return closure + + def _get_runtime_requirements(self) -> List[Requirement]: + req_strings = self._get_distribution(self.name).requires + if req_strings is None: + return [] + return [Requirement(req_string) for req_string in req_strings] + + def _requirement_applies_to_environment( + self, requirement: Requirement + ) -> bool: + # Do not include any requirements defined as extras as currently + # our dependency closure does not use any extras + if requirement.extras: + return False + # Only include requirements where the markers apply to the current + # environment. + if requirement.marker and not requirement.marker.evaluate(): + return False + return True + + def _get_distribution(self, name: str) -> importlib.metadata.Distribution: + # For v2, we inject our own MetaPathFinder to handle + # botocore/s3transfer import aliases. However for the typical + # importlib.metadata.distribution(), it extends the built-in + # MetaPathFinder to include its own find_distributions() method + # to search for distribution directories. Read more here: + # https://docs.python.org/3/library/importlib.metadata.html#extending-the-search-algorithm + # + # Our MetaPathFinder class does not implement this method, which + # causes importlib.metadata.distribution() to not find the "awscli" + # package. So instead, this helper method is implemented to locate the + # dist-info directories based off our current site-packages + # and explicitly provide the directory to avoid needing to use + # MetaPathFinders and thus avoid this issue. + + # Packages names may have a "-". These get converted to "_" for + # their respective directory names in the site packages directory. + snake_case_name = name.replace("-", "_") + for sitepackages in site.getsitepackages(): + for filename in os.listdir(sitepackages): + if fnmatch.fnmatch(filename, f"{snake_case_name}-*.dist-info"): + return importlib.metadata.Distribution.at( + os.path.join(sitepackages, filename) + ) + raise ValueError( + f'Could not find .dist-info directory for {snake_case_name}' + ) + + +class DependencyClosure: + def __init__(self) -> None: + self._req_to_package: Dict[Requirement, Package] = {} + + def __setitem__(self, key: Requirement, value: Package) -> None: + self._req_to_package[key] = value + + def __getitem__(self, key: Requirement) -> Package: + return self._req_to_package[key] + + def __delitem__(self, key: Requirement) -> None: + del self._req_to_package[key] + + def __iter__(self) -> Iterator[Requirement]: + return iter(self._req_to_package) + + def __len__(self) -> int: + return len(self._req_to_package) + + def walk(self) -> Iterator[Tuple[Requirement, Package]]: + for req, package in self._req_to_package.items(): + yield req, package + yield from package.runtime_dependencies.walk() + + def to_dict(self) -> _NESTED_STR_DICT: + reqs = {} + for req, package in self._req_to_package.items(): + reqs[str(req)] = package.runtime_dependencies.to_dict() + return reqs + + +class TestDependencyClosure: + def _is_bounded_version_requirement( + self, requirement: Requirement + ) -> bool: + for specifier in requirement.specifier: + if specifier.operator in ["==", "<=", "<"]: + return True + return False + + def _pformat_closure(self, closure: DependencyClosure) -> str: + return json.dumps(closure.to_dict(), sort_keys=True, indent=2) + + def test_expected_runtime_dependencies(self, awscli_package): + expected_dependencies = { + "awscrt", + "cffi", + "colorama", + "cryptography", + "distro", + "docutils", + "jmespath", + "prompt-toolkit", + "pycparser", + "python-dateutil", + "ruamel.yaml", + "ruamel.yaml.clib", + "six", + "urllib3", + "wcwidth", + } + actual_dependencies = set() + for _, package in awscli_package.runtime_dependencies.walk(): + actual_dependencies.add(package.name) + assert actual_dependencies == expected_dependencies, ( + f"Unexpected dependency found in runtime closure: " + f"{self._pformat_closure(awscli_package.runtime_dependencies)}" + ) + + def test_expected_unbounded_runtime_dependencies(self, awscli_package): + expected_unbounded_dependencies = { + "cffi", # Transitive dependency from cryptography + "pycparser", # Transitive dependency from cffi + "six", # Transitive dependency from python-dateutil + "wcwidth", # Transitive dependency from prompt-toolkit + } + all_dependencies = set() + bounded_dependencies = set() + for req, package in awscli_package.runtime_dependencies.walk(): + all_dependencies.add(package.name) + if self._is_bounded_version_requirement(req): + bounded_dependencies.add(package.name) + actual_unbounded_dependencies = all_dependencies - bounded_dependencies + assert ( + actual_unbounded_dependencies == expected_unbounded_dependencies + ), ( + f"Unexpected unbounded dependency found in runtime closure: " + f"{self._pformat_closure(awscli_package.runtime_dependencies)}" + ) diff -Nru awscli-2.15.9/tests/functional/botocore/endpoint-rules/appconfigdata/endpoint-tests-1.json awscli-2.15.22/tests/functional/botocore/endpoint-rules/appconfigdata/endpoint-tests-1.json --- awscli-2.15.9/tests/functional/botocore/endpoint-rules/appconfigdata/endpoint-tests-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/endpoint-rules/appconfigdata/endpoint-tests-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -420,7 +420,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://appconfigdata-fips.us-gov-east-1.amazonaws.com" + "url": "https://appconfigdata.us-gov-east-1.amazonaws.com" } }, "params": { diff -Nru awscli-2.15.9/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json awscli-2.15.22/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json --- awscli-2.15.9/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://artifact.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff -Nru awscli-2.15.9/tests/functional/botocore/endpoint-rules/chatbot/endpoint-tests-1.json awscli-2.15.22/tests/functional/botocore/endpoint-rules/chatbot/endpoint-tests-1.json --- awscli-2.15.9/tests/functional/botocore/endpoint-rules/chatbot/endpoint-tests-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/endpoint-rules/chatbot/endpoint-tests-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://chatbot.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff -Nru awscli-2.15.9/tests/functional/botocore/endpoint-rules/resource-explorer-2/endpoint-tests-1.json awscli-2.15.22/tests/functional/botocore/endpoint-rules/resource-explorer-2/endpoint-tests-1.json --- awscli-2.15.9/tests/functional/botocore/endpoint-rules/resource-explorer-2/endpoint-tests-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/endpoint-rules/resource-explorer-2/endpoint-tests-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -9,7 +9,21 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -21,7 +35,21 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -33,7 +61,21 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -45,7 +87,21 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -57,7 +113,21 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false } }, { @@ -69,7 +139,117 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://resource-explorer-2.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false } }, { @@ -82,6 +262,7 @@ "params": { "Region": "us-east-1", "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -94,6 +275,7 @@ }, "params": { "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -105,6 +287,19 @@ "params": { "Region": "us-east-1", "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } }, diff -Nru awscli-2.15.9/tests/functional/botocore/endpoint-rules/s3control/endpoint-tests-1.json awscli-2.15.22/tests/functional/botocore/endpoint-rules/s3control/endpoint-tests-1.json --- awscli-2.15.9/tests/functional/botocore/endpoint-rules/s3control/endpoint-tests-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/endpoint-rules/s3control/endpoint-tests-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -967,10 +967,54 @@ } }, { - "documentation": "outpost access points do not support dualstack@us-west-2", + "documentation": "outpost access points support dualstack@us-west-2", "expect": { - "error": "Invalid configuration: Outpost Access Points do not support dual-stack" + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts.us-west-2.api.aws" + } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", @@ -981,31 +1025,121 @@ } }, { - "documentation": "outpost access points do not support dualstack@cn-north-1", + "documentation": "outpost access points support dualstack@af-south-1", "expect": { - "error": "Invalid configuration: Outpost Access Points do not support dual-stack" + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "af-south-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts.af-south-1.api.aws" + } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "af-south-1", + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "af-south-1", + "AWS::UseDualStack": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccessPointName": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", - "Region": "cn-north-1", + "Region": "af-south-1", "RequiresAccountId": true, "UseDualStack": true, "UseFIPS": false } }, { - "documentation": "outpost access points do not support dualstack@af-south-1", + "documentation": "outpost access points support fips + dualstack@af-south-1", "expect": { - "error": "Invalid configuration: Outpost Access Points do not support dual-stack" + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "af-south-1", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts-fips.af-south-1.api.aws" + } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "af-south-1", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true + }, + "operationName": "GetAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + }, + { + "builtInParams": { + "AWS::Region": "af-south-1", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true + }, + "operationName": "DeleteAccessPoint", + "operationParams": { + "Name": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccountId": "123456789012" + } + } + ], "params": { - "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", + "AccessPointName": "arn:aws:s3-outposts:af-south-1:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", "AccountId": "123456789012", "Region": "af-south-1", "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": false + "UseFIPS": true } }, { @@ -1422,6 +1556,46 @@ } }, { + "documentation": "ListRegionalBucket + OutpostId + fips + dualstack@us-east-2", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts-fips.us-east-2.api.aws" + } + }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-2", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true + }, + "operationName": "ListRegionalBuckets", + "operationParams": { + "AccountId": "123456789012", + "OutpostId": "op-123" + } + } + ], + "params": { + "AccountId": "123456789012", + "OutpostId": "op-123", + "Region": "us-east-2", + "RequiresAccountId": true, + "UseDualStack": true, + "UseFIPS": true + } + }, + { "documentation": "CreateBucket + OutpostId endpoint url@us-east-2", "expect": { "endpoint": { @@ -1465,11 +1639,11 @@ { "documentation": "dualstack cannot be used with outposts when an endpoint URL is set@us-west-2.", "expect": { - "error": "Invalid configuration: Outpost Access Points do not support dual-stack" + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "params": { "AccessPointName": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint", - "Endpoint": "https://beta.example.com", + "Endpoint": "https://s3-outposts.us-west-2.api.aws", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, @@ -1477,21 +1651,6 @@ } }, { - "documentation": "Dual-stack cannot be used with outposts@us-west-2", - "expect": { - "error": "Invalid configuration: Outposts do not support dual-stack" - }, - "params": { - "Bucket": "bucketname", - "Endpoint": "https://beta.example.com", - "OutpostId": "op-123", - "Region": "us-west-2", - "RequiresAccountId": false, - "UseDualStack": true, - "UseFIPS": false - } - }, - { "documentation": "vanilla bucket arn requires account id@us-west-2", "expect": { "endpoint": { @@ -1720,16 +1879,50 @@ } }, { - "documentation": "Outposts do not support dualstack@us-west-2", + "documentation": "bucket ARN in aws partition with fips + dualstack@us-east-2", "expect": { - "error": "Invalid configuration: Outpost buckets do not support dual-stack" + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-east-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts-fips.us-east-2.api.aws" + } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-east-2", + "AWS::UseFIPS": true, + "AWS::UseDualStack": true + }, + "operationName": "GetBucket", + "operationParams": { + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" + } + } + ], "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", + "Bucket": "arn:aws:s3-outposts:us-east-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "Region": "us-east-2", "RequiresAccountId": true, "UseDualStack": true, - "UseFIPS": false + "UseFIPS": true } }, { @@ -1961,10 +2154,43 @@ } }, { - "documentation": "Outposts do not support dualstack@us-west-2", + "documentation": "Outposts support dualstack @us-west-2", "expect": { - "error": "Invalid configuration: Outpost buckets do not support dual-stack" + "endpoint": { + "headers": { + "x-amz-account-id": [ + "123456789012" + ], + "x-amz-outpost-id": [ + "op-01234567890123456" + ] + }, + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "us-west-2", + "disableDoubleEncoding": true + } + ] + }, + "url": "https://s3-outposts.us-west-2.api.aws" + } }, + "operationInputs": [ + { + "builtInParams": { + "AWS::Region": "us-west-2", + "AWS::UseDualStack": true + }, + "operationName": "GetBucket", + "operationParams": { + "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", + "AccountId": "123456789012" + } + } + ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", "Region": "us-west-2", @@ -2202,19 +2428,6 @@ } }, { - "documentation": "Outposts do not support dualstack@us-west-2", - "expect": { - "error": "Invalid configuration: Outpost buckets do not support dual-stack" - }, - "params": { - "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Region": "us-west-2", - "RequiresAccountId": true, - "UseDualStack": true, - "UseFIPS": false - } - }, - { "documentation": "Invalid ARN: missing outpost id and bucket@us-west-2", "expect": { "error": "Invalid ARN: The Outpost Id was not set" @@ -2995,16 +3208,16 @@ } }, { - "documentation": "get bucket with endpoint_url and dualstack is not supported@us-west-2", + "documentation": "get bucket with custom endpoint and dualstack is not supported@us-west-2", "expect": { - "error": "Invalid configuration: Outpost buckets do not support dual-stack" + "error": "Invalid Configuration: DualStack and custom endpoint are not supported" }, "operationInputs": [ { "builtInParams": { "AWS::Region": "us-west-2", "AWS::UseDualStack": true, - "SDK::Endpoint": "https://beta.example.com" + "SDK::Endpoint": "https://s3-outposts.us-west-2.api.aws" }, "operationName": "GetBucket", "operationParams": { @@ -3015,7 +3228,7 @@ ], "params": { "Bucket": "arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:bucket:mybucket", - "Endpoint": "https://beta.example.com", + "Endpoint": "https://s3-outposts.us-west-2.api.aws", "Region": "us-west-2", "RequiresAccountId": true, "UseDualStack": true, diff -Nru awscli-2.15.9/tests/functional/botocore/endpoint-rules/sagemaker/endpoint-tests-1.json awscli-2.15.22/tests/functional/botocore/endpoint-rules/sagemaker/endpoint-tests-1.json --- awscli-2.15.9/tests/functional/botocore/endpoint-rules/sagemaker/endpoint-tests-1.json 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/endpoint-rules/sagemaker/endpoint-tests-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -433,7 +433,7 @@ "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api-fips.sagemaker.us-gov-west-1.amazonaws.com" + "url": "https://api.sagemaker.us-gov-west-1.amazonaws.com" } }, "params": { @@ -459,7 +459,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://api-fips.sagemaker.us-gov-east-1.amazonaws.com" + "url": "https://api.sagemaker.us-gov-east-1.amazonaws.com" } }, "params": { diff -Nru awscli-2.15.9/tests/functional/botocore/endpoint-rules/supplychain/endpoint-tests-1.json awscli-2.15.22/tests/functional/botocore/endpoint-rules/supplychain/endpoint-tests-1.json --- awscli-2.15.9/tests/functional/botocore/endpoint-rules/supplychain/endpoint-tests-1.json 1970-01-01 00:00:00.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/endpoint-rules/supplychain/endpoint-tests-1.json 2024-02-21 17:34:54.000000000 +0000 @@ -0,0 +1,314 @@ +{ + "testCases": [ + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://scn.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://scn.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff -Nru awscli-2.15.9/tests/functional/botocore/test_s3_control_redirects.py awscli-2.15.22/tests/functional/botocore/test_s3_control_redirects.py --- awscli-2.15.9/tests/functional/botocore/test_s3_control_redirects.py 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/tests/functional/botocore/test_s3_control_redirects.py 2024-02-21 17:34:54.000000000 +0000 @@ -20,7 +20,6 @@ from botocore import exceptions from botocore.exceptions import ( UnsupportedS3ControlArnError, - UnsupportedS3ControlConfigurationError, InvalidHostLabelError, ParamValidationError, ) @@ -128,8 +127,13 @@ 'arn': 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint', 'config': {'s3': {'use_dualstack_endpoint': True}}, 'assertions': { - 'exception': 'UnsupportedS3ControlConfigurationError', - } + 'signing_name': 's3-outposts', + 'netloc': 's3-outposts.us-west-2.api.aws', + 'headers': { + 'x-amz-outpost-id': 'op-01234567890123456', + 'x-amz-account-id': '123456789012', + }, + }, }, { 'arn': 'arn:aws:s3-outposts:us-west-2:123456789012:outpost:op-01234567890123456:accesspoint:myaccesspoint', @@ -255,8 +259,13 @@ 'region': 'us-west-2', 'config': {'s3': {'use_dualstack_endpoint': True}}, 'assertions': { - 'exception': 'UnsupportedS3ControlConfigurationError', - } + 'signing_name': 's3-outposts', + 'netloc': 's3-outposts.us-west-2.api.aws', + 'headers': { + 'x-amz-outpost-id': 'op-01234567890123456', + 'x-amz-account-id': '123456789012', + }, + }, }, { 'arn': 'arn:aws:s3-outposts:us-west-2:123456789012:outpost', @@ -401,8 +410,11 @@ def test_outpost_id_redirection_dualstack(self): config = Config(s3={'use_dualstack_endpoint': True}) self._bootstrap_client(config=config) - with self.assertRaises(UnsupportedS3ControlConfigurationError): + self.stubber.add_response() + with self.stubber: self.client.create_bucket(Bucket='foo', OutpostId='op-123') + _assert_netloc(self.stubber, 's3-outposts.us-west-2.api.aws') + _assert_header(self.stubber, 'x-amz-outpost-id', 'op-123') def test_outpost_id_redirection_create_bucket(self): self.stubber.add_response() diff -Nru awscli-2.15.9/tests/functional/s3/test_sync_command.py awscli-2.15.22/tests/functional/s3/test_sync_command.py --- awscli-2.15.9/tests/functional/s3/test_sync_command.py 2024-01-10 17:33:58.000000000 +0000 +++ awscli-2.15.22/tests/functional/s3/test_sync_command.py 2024-02-21 17:34:54.000000000 +0000 @@ -17,7 +17,7 @@ from awscli.compat import six from awscli.customizations.s3.utils import relative_path -from awscli.testutils import mock +from awscli.testutils import mock, cd from tests.functional.s3 import ( BaseS3TransferCommandTest, BaseS3CLIRunnerTest, BaseCRTTransferClientTest ) @@ -467,3 +467,37 @@ self.run_command(cmdline) self.assertEqual(self.get_crt_make_request_calls(), []) self.assert_no_remaining_botocore_responses() + +class TestSyncCommandWithS3Express(BaseS3TransferCommandTest): + prefix = 's3 sync ' + + def test_incompatible_with_sync_upload(self): + cmdline = '%s %s s3://testdirectorybucket--usw2-az1--x-s3/' % (self.prefix, self.files.rootdir) + stderr = self.run_cmd(cmdline, expected_rc=252)[1] + self.assertIn('Cannot use sync command with a directory bucket.', stderr) + + def test_incompatible_with_sync_download(self): + cmdline = '%s s3://testdirectorybucket--usw2-az1--x-s3/ %s' % (self.prefix, self.files.rootdir) + stderr = self.run_cmd(cmdline, expected_rc=252)[1] + self.assertIn('Cannot use sync command with a directory bucket.', stderr) + + def test_incompatible_with_sync_copy(self): + cmdline = '%s s3://bucket/ s3://testdirectorybucket--usw2-az1--x-s3/' % self.prefix + stderr = self.run_cmd(cmdline, expected_rc=252)[1] + self.assertIn('Cannot use sync command with a directory bucket.', stderr) + + def test_incompatible_with_sync_with_delete(self): + cmdline = '%s s3://bucket/ s3://testdirectorybucket--usw2-az1--x-s3/ --delete' % self.prefix + stderr = self.run_cmd(cmdline, expected_rc=252)[1] + self.assertIn('Cannot use sync command with a directory bucket.', stderr) + + def test_compatible_with_sync_with_local_directory_like_directory_bucket(self): + self.parsed_responses = [ + {'Contents': []} + ] + cmdline = '%s s3://bucket/ testdirectorybucket--usw2-az1--x-s3/' % self.prefix + with cd(self.files.rootdir): + _, stderr, _ = self.run_cmd(cmdline) + # Just asserting that command validated and made an API call + self.assertEqual(len(self.operations_called), 1) + self.assertEqual(self.operations_called[0][0].name, 'ListObjectsV2')