From f0844160667078b3df894b09b520875a0692ffd5 Mon Sep 17 00:00:00 2001 From: Daniel Marshall <9268618+danieljmt@users.noreply.github.com> Date: Wed, 23 Feb 2022 18:25:24 +0000 Subject: [PATCH 1/4] go.mod: update vulnerable net library (#4292) Fixes #4291 by updating SDK's dependency on `golang.org/x/text` package to latest version CVE issue is addressed --- go.mod | 2 +- go.sum | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 9ebf57c2347..b05bb9c0fb8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aws/aws-sdk-go require ( github.com/jmespath/go-jmespath v0.4.0 github.com/pkg/errors v0.9.1 - golang.org/x/net v0.0.0-20211216030914-fe4d6282115f + golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd ) go 1.11 diff --git a/go.sum b/go.sum index 14daba77be1..c8a516c57b2 100644 --- a/go.sum +++ b/go.sum @@ -9,13 +9,13 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 021d53ccfdd95072d18c896310f5a02dd7931415 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Wed, 23 Feb 2022 11:20:47 -0800 Subject: [PATCH 2/4] Release v1.43.5 (2022-02-23) (#4293) Release v1.43.5 (2022-02-23) === ### Service Client Updates * `service/lambda`: Updates service API, documentation, and waiters * Lambda releases .NET 6 managed runtime to be available in all commercial regions. * `service/textract`: Updates service API * `service/transfer`: Updates service API and documentation * The file input selection feature provides the ability to use either the originally uploaded file or the output file from the previous workflow step, enabling customers to make multiple copies of the original file while keeping the source file intact for file archival. --- CHANGELOG.md | 10 + aws/endpoints/defaults.go | 207 +++++++++++++++++++ aws/version.go | 2 +- models/apis/lambda/2015-03-31/api-2.json | 1 + models/apis/lambda/2015-03-31/docs-2.json | 10 +- models/apis/lambda/2015-03-31/waiters-2.json | 56 ++++- models/apis/textract/2018-06-27/api-2.json | 11 +- models/apis/transfer/2018-11-05/api-2.json | 21 +- models/apis/transfer/2018-11-05/docs-2.json | 23 ++- models/endpoints/endpoints.json | 136 ++++++++++++ service/lambda/api.go | 28 ++- service/lambda/lambdaiface/interface.go | 6 + service/lambda/waiters.go | 112 ++++++++++ service/textract/api.go | 20 ++ service/transfer/api.go | 116 +++++++---- 15 files changed, 689 insertions(+), 70 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b7bfa43bd9b..f9cfa73c4a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.43.5 (2022-02-23) +=== + +### Service Client Updates +* `service/lambda`: Updates service API, documentation, and waiters + * Lambda releases .NET 6 managed runtime to be available in all commercial regions. +* `service/textract`: Updates service API +* `service/transfer`: Updates service API and documentation + * The file input selection feature provides the ability to use either the originally uploaded file or the output file from the previous workflow step, enabling customers to make multiple copies of the original file while keeping the source file intact for file archival. + Release v1.43.4 (2022-02-22) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 6b99e8db8c2..d81b107f167 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -1669,6 +1669,147 @@ var awsPartition = partition{ }, }, }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, "apigateway": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21963,6 +22104,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "api.tunneling.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "apigateway": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -23471,6 +23622,14 @@ var awsusgovPartition = partition{ }, }, "acm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm.{region}.{dnsSuffix}", + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", @@ -23761,6 +23920,54 @@ var awsusgovPartition = partition{ }, }, }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "apigateway": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index 7a53b789d3e..ec125eca874 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.43.4" +const SDKVersion = "1.43.5" diff --git a/models/apis/lambda/2015-03-31/api-2.json b/models/apis/lambda/2015-03-31/api-2.json index be8a5f8f5c4..8032cd60005 100644 --- a/models/apis/lambda/2015-03-31/api-2.json +++ b/models/apis/lambda/2015-03-31/api-2.json @@ -3235,6 +3235,7 @@ "dotnetcore2.0", "dotnetcore2.1", "dotnetcore3.1", + "dotnet6", "nodejs4.3-edge", "go1.x", "ruby2.5", diff --git a/models/apis/lambda/2015-03-31/docs-2.json b/models/apis/lambda/2015-03-31/docs-2.json index 7e647a254ea..80f9da66617 100644 --- a/models/apis/lambda/2015-03-31/docs-2.json +++ b/models/apis/lambda/2015-03-31/docs-2.json @@ -57,7 +57,7 @@ "UpdateAlias": "
Updates the configuration of a Lambda function alias.
", "UpdateCodeSigningConfig": "Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.
", "UpdateEventSourceMapping": "Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.
For details about how to configure different event sources, see the following topics.
The following error handling options are only available for stream sources (DynamoDB and Kinesis):
BisectBatchOnFunctionError
- If the function returns an error, split the batch in two and retry.
DestinationConfig
- Send discarded records to an Amazon SQS queue or Amazon SNS topic.
MaximumRecordAgeInSeconds
- Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts
- Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor
- Process multiple batches from each shard concurrently.
For information about which configuration parameters apply to each event source, see the following topics.
", - "UpdateFunctionCode": "Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing.
The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.
For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.
Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing.
If the function's package type is Image
, you must specify the code package in ImageUri
as the URI of a container image in the Amazon ECR registry.
If the function's package type is Zip
, you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile
field.
The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64
or arm64
).
The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.
For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.
Modify the version-specific settings of a Lambda function.
When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus
, LastUpdateStatusReason
, and LastUpdateStatusReasonCode
fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Function States.
These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.
To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an account or Amazon Web Services service, use AddPermission.
", "UpdateFunctionEventInvokeConfig": "Updates the configuration for asynchronous invocation for a function, version, or alias.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
" }, @@ -204,7 +204,7 @@ "InvocationRequest$Payload": "The JSON that you want to provide to your Lambda function as input.
You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'
. You can also specify a file path. For example, --payload file://payload.json
.
The response from the function, or an error object.
", "LayerVersionContentInput$ZipFile": "The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.
", - "UpdateFunctionCodeRequest$ZipFile": "The base64-encoded contents of the deployment package. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.
" + "UpdateFunctionCodeRequest$ZipFile": "The base64-encoded contents of the deployment package. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.
" } }, "BlobStream": { @@ -1606,7 +1606,7 @@ "refs": { "FunctionCode$S3Bucket": "An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.
", "LayerVersionContentInput$S3Bucket": "The Amazon S3 bucket of the layer archive.
", - "UpdateFunctionCodeRequest$S3Bucket": "An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.
" + "UpdateFunctionCodeRequest$S3Bucket": "An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account. Use only with a function defined with a .zip file archive deployment package.
" } }, "S3Key": { @@ -1614,7 +1614,7 @@ "refs": { "FunctionCode$S3Key": "The Amazon S3 key of the deployment package.
", "LayerVersionContentInput$S3Key": "The Amazon S3 key of the layer archive.
", - "UpdateFunctionCodeRequest$S3Key": "The Amazon S3 key of the deployment package.
" + "UpdateFunctionCodeRequest$S3Key": "The Amazon S3 key of the deployment package. Use only with a function defined with a .zip file archive deployment package.
" } }, "S3ObjectVersion": { @@ -1850,7 +1850,7 @@ "UnsupportedMediaTypeException$message": null, "UpdateAliasRequest$RevisionId": "Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying an alias that has changed since you last read it.
", "UpdateEventSourceMappingRequest$UUID": "The identifier of the event source mapping.
", - "UpdateFunctionCodeRequest$ImageUri": "URI of a container image in the Amazon ECR registry.
", + "UpdateFunctionCodeRequest$ImageUri": "URI of a container image in the Amazon ECR registry. Do not use for a function defined with a .zip file archive.
", "UpdateFunctionCodeRequest$RevisionId": "Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.
", "UpdateFunctionConfigurationRequest$RevisionId": "Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.
" } diff --git a/models/apis/lambda/2015-03-31/waiters-2.json b/models/apis/lambda/2015-03-31/waiters-2.json index af39bfc169c..b4c18f64310 100644 --- a/models/apis/lambda/2015-03-31/waiters-2.json +++ b/models/apis/lambda/2015-03-31/waiters-2.json @@ -22,7 +22,7 @@ "delay": 5, "maxAttempts": 60, "operation": "GetFunctionConfiguration", - "description": "Waits for the function's State to be Active.", + "description": "Waits for the function's State to be Active. This waiter uses GetFunctionConfiguration API. This should be used after new function creation.", "acceptors": [ { "state": "success", @@ -48,7 +48,7 @@ "delay": 5, "maxAttempts": 60, "operation": "GetFunctionConfiguration", - "description": "Waits for the function's LastUpdateStatus to be Successful.", + "description": "Waits for the function's LastUpdateStatus to be Successful. This waiter uses GetFunctionConfiguration API. This should be used after function updates.", "acceptors": [ { "state": "success", @@ -69,6 +69,58 @@ "expected": "InProgress" } ] + }, + "FunctionActiveV2": { + "delay": 1, + "maxAttempts": 300, + "operation": "GetFunction", + "description": "Waits for the function's State to be Active. This waiter uses GetFunction API. This should be used after new function creation.", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Configuration.State", + "expected": "Active" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Configuration.State", + "expected": "Failed" + }, + { + "state": "retry", + "matcher": "path", + "argument": "Configuration.State", + "expected": "Pending" + } + ] + }, + "FunctionUpdatedV2": { + "delay": 1, + "maxAttempts": 300, + "operation": "GetFunction", + "description": "Waits for the function's LastUpdateStatus to be Successful. This waiter uses GetFunction API. This should be used after function updates.", + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "Configuration.LastUpdateStatus", + "expected": "Successful" + }, + { + "state": "failure", + "matcher": "path", + "argument": "Configuration.LastUpdateStatus", + "expected": "Failed" + }, + { + "state": "retry", + "matcher": "path", + "argument": "Configuration.LastUpdateStatus", + "expected": "InProgress" + } + ] } } } diff --git a/models/apis/textract/2018-06-27/api-2.json b/models/apis/textract/2018-06-27/api-2.json index 66e4a519491..ef352b38554 100644 --- a/models/apis/textract/2018-06-27/api-2.json +++ b/models/apis/textract/2018-06-27/api-2.json @@ -324,7 +324,9 @@ "WORD", "TABLE", "CELL", - "SELECTION_ELEMENT" + "SELECTION_ELEMENT", + "MERGED_CELL", + "TITLE" ] }, "BoundingBox":{ @@ -404,7 +406,8 @@ "type":"string", "enum":[ "KEY", - "VALUE" + "VALUE", + "COLUMN_HEADER" ] }, "EntityTypes":{ @@ -800,7 +803,9 @@ "enum":[ "VALUE", "CHILD", - "COMPLEX_FEATURES" + "COMPLEX_FEATURES", + "MERGED_CELL", + "TITLE" ] }, "RoleArn":{ diff --git a/models/apis/transfer/2018-11-05/api-2.json b/models/apis/transfer/2018-11-05/api-2.json index 593fd1303a4..f48ca227604 100644 --- a/models/apis/transfer/2018-11-05/api-2.json +++ b/models/apis/transfer/2018-11-05/api-2.json @@ -554,7 +554,8 @@ "members":{ "Name":{"shape":"WorkflowStepName"}, "DestinationFileLocation":{"shape":"InputFileLocation"}, - "OverwriteExisting":{"shape":"OverwriteExisting"} + "OverwriteExisting":{"shape":"OverwriteExisting"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "CreateAccessRequest":{ @@ -666,7 +667,8 @@ "members":{ "Name":{"shape":"WorkflowStepName"}, "Target":{"shape":"CustomStepTarget"}, - "TimeoutSeconds":{"shape":"CustomStepTimeoutSeconds"} + "TimeoutSeconds":{"shape":"CustomStepTimeoutSeconds"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "CustomStepStatus":{ @@ -721,7 +723,8 @@ "DeleteStepDetails":{ "type":"structure", "members":{ - "Name":{"shape":"WorkflowStepName"} + "Name":{"shape":"WorkflowStepName"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "DeleteUserRequest":{ @@ -967,9 +970,9 @@ }, "EfsPath":{ "type":"string", - "max":100, + "max":65536, "min":1, - "pattern":"^(\\/|(\\/(?!\\.)+[^$#<>;`|&?{}^*/\\n]+){1,4})$" + "pattern":"^[^\\x00]+$" }, "EndpointDetails":{ "type":"structure", @@ -1663,6 +1666,11 @@ "min":3, "pattern":"^[\\w-]*$" }, + "SourceFileLocation":{ + "type":"string", + "max":256, + "pattern":"^\\$\\{(\\w+.)+\\w+\\}$" + }, "SourceIp":{ "type":"string", "max":32, @@ -1769,7 +1777,8 @@ "type":"structure", "members":{ "Name":{"shape":"WorkflowStepName"}, - "Tags":{"shape":"S3Tags"} + "Tags":{"shape":"S3Tags"}, + "SourceFileLocation":{"shape":"SourceFileLocation"} } }, "TagValue":{ diff --git a/models/apis/transfer/2018-11-05/docs-2.json b/models/apis/transfer/2018-11-05/docs-2.json index 34bb50ed206..a763f50325f 100644 --- a/models/apis/transfer/2018-11-05/docs-2.json +++ b/models/apis/transfer/2018-11-05/docs-2.json @@ -439,7 +439,7 @@ } }, "HomeDirectoryMapEntry": { - "base": "Represents an object that contains entries and targets for HomeDirectoryMappings
.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api
or efsapi
call instead of s3
or efs
so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/
. Make sure that the end of the key name ends in a /
for it to be considered a folder.
Represents an object that contains entries and targets for HomeDirectoryMappings
.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory
parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api
or efsapi
call instead of s3
or efs
so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/
. Make sure that the end of the key name ends in a /
for it to be considered a folder.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api
or efsapi
call instead of s3
or efs
so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/
. Make sure that the end of the key name ends in a /
for it to be considered a folder.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory
parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
In most cases, you can use this value instead of the session policy to lock down the associated access to the designated home directory (\"chroot
\"). To do this, you can set Entry
to '/' and set Target
to the HomeDirectory
parameter value.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot
\"). To do this, you can set Entry
to '/' and set Target
to the HomeDirectory parameter value.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory
parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api
or efsapi
call instead of s3
or efs
so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/
. Make sure that the end of the key name ends in a /
for it to be considered a folder.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to '/' and set Target
to the HomeDirectory parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api
or efsapi
call instead of s3
or efs
so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/
. Make sure that the end of the key name ends in a /
for it to be considered a folder.
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to /
and set Target
to the HomeDirectory
parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry
and Target
pair, where Entry
shows how the path is made visible and Target
is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target
. This value can only be set when HomeDirectoryType
is set to LOGICAL.
The following is an Entry
and Target
pair example.
[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot
\"). To do this, you can set Entry
to '/' and set Target
to the HomeDirectory parameter value.
The following is an Entry
and Target
pair example for chroot
.
[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]
Specifies the location for the file being copied. Only applicable for the Copy type of workflow steps.
", "refs": { - "CopyStepDetails$DestinationFileLocation": null + "CopyStepDetails$DestinationFileLocation": "Specifies the location for the file being copied. Only applicable for Copy type workflow steps. Use ${Transfer:username}
in this field to parametrize the destination prefix by username.
A message that indicates whether the test was successful or not.
" + "TestIdentityProviderResponse$Message": "A message that indicates whether the test was successful or not.
If an empty string is returned, the most likely cause is that the authentication failed due to an incorrect username or password.
The system-assigned unique identifier for a session that corresponds to the workflow.
" } }, + "SourceFileLocation": { + "base": null, + "refs": { + "CopyStepDetails$SourceFileLocation": "Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
Enter ${previous.file}
to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
Enter ${original.file}
to use the originally-uploaded file location as input for this step.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
Enter ${previous.file}
to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
Enter ${original.file}
to use the originally-uploaded file location as input for this step.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
Enter ${previous.file}
to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
Enter ${original.file}
to use the originally-uploaded file location as input for this step.
Specifies which file to use as input to the workflow step: either the output from the previous step, or the originally uploaded file for the workflow.
Enter ${previous.file}
to use the previous file as the input. In this case, this workflow step uses the output file from the previous workflow step as input. This is the default value.
Enter ${original.file}
to use the originally-uploaded file location as input for this step.
Deletes one or more scheduled actions for the specified Auto Scaling group.
", "BatchPutScheduledUpdateGroupAction": "Creates or updates one or more scheduled scaling actions for an Auto Scaling group.
", "CancelInstanceRefresh": "Cancels an instance refresh operation in progress. Cancellation does not roll back any replacements that have already been completed, but it prevents new replacements from being started.
This operation is part of the instance refresh feature in Amazon EC2 Auto Scaling, which helps you update instances in your Auto Scaling group after you make configuration changes.
", - "CompleteLifecycleAction": "Completes the lifecycle action for the specified token or instance with the specified result.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
", + "CompleteLifecycleAction": "Completes the lifecycle action for the specified token or instance with the specified result.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a launch template or launch configuration with a user data script that runs while an instance is in a wait state due to a lifecycle hook.
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when an instance is put into a wait state due to a lifecycle hook.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
", "CreateAutoScalingGroup": "We strongly recommend using a launch template when calling this operation to ensure full functionality for Amazon EC2 Auto Scaling and Amazon EC2.
Creates an Auto Scaling group with the specified name and attributes.
If you exceed your maximum limit of Auto Scaling groups, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.
For introductory exercises for creating an Auto Scaling group, see Getting started with Amazon EC2 Auto Scaling and Tutorial: Set up a scaled and load-balanced application in the Amazon EC2 Auto Scaling User Guide. For more information, see Auto Scaling groups in the Amazon EC2 Auto Scaling User Guide.
Every Auto Scaling group has three size parameters (DesiredCapacity
, MaxSize
, and MinSize
). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances.
Creates a launch configuration.
If you exceed your maximum limit of launch configurations, the call fails. To query this limit, call the DescribeAccountLimits API. For information about updating this limit, see Amazon EC2 Auto Scaling service quotas in the Amazon EC2 Auto Scaling User Guide.
For more information, see Launch configurations in the Amazon EC2 Auto Scaling User Guide.
", "CreateOrUpdateTags": "Creates or updates tags for the specified Auto Scaling group.
When you specify a tag with a key that already exists, the operation overwrites the previous tag definition, and you do not get an error message.
For more information, see Tagging Auto Scaling groups and instances in the Amazon EC2 Auto Scaling User Guide.
", @@ -49,12 +49,12 @@ "ExecutePolicy": "Executes the specified policy. This can be useful for testing the design of your scaling policy.
", "ExitStandby": "Moves the specified instances out of the standby state.
After you put the instances back in service, the desired capacity is incremented.
For more information, see Temporarily removing instances from your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide.
", "GetPredictiveScalingForecast": "Retrieves the forecast data for a predictive scaling policy.
Load forecasts are predictions of the hourly load values using historical load data from CloudWatch and an analysis of historical trends. Capacity forecasts are represented as predicted values for the minimum capacity that is needed on an hourly basis, based on the hourly load forecast.
A minimum of 24 hours of data is required to create the initial forecasts. However, having a full 14 days of historical data results in more accurate forecasts.
For more information, see Predictive scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
", - "PutLifecycleHook": "Creates or updates a lifecycle hook for the specified Auto Scaling group.
A lifecycle hook enables an Auto Scaling group to be aware of events in the Auto Scaling instance lifecycle, and then perform a custom action when the corresponding lifecycle event occurs.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state using the RecordLifecycleActionHeartbeat API call.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.
", + "PutLifecycleHook": "Creates or updates a lifecycle hook for the specified Auto Scaling group.
Lifecycle hooks let you create solutions that are aware of events in the Auto Scaling instance lifecycle, and then perform a custom action on instances when the corresponding lifecycle event occurs.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a launch template or launch configuration with a user data script that runs while an instance is in a wait state due to a lifecycle hook.
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when an instance is put into a wait state due to a lifecycle hook.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state using the RecordLifecycleActionHeartbeat API call.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
If you exceed your maximum limit of lifecycle hooks, which by default is 50 per Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks API call. If you are no longer using a lifecycle hook, you can delete it by calling the DeleteLifecycleHook API.
", "PutNotificationConfiguration": "Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.
This configuration overwrites any existing configuration.
For more information, see Getting Amazon SNS notifications when your Auto Scaling group scales in the Amazon EC2 Auto Scaling User Guide.
If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling group, the call fails.
", "PutScalingPolicy": "Creates or updates a scaling policy for an Auto Scaling group. Scaling policies are used to scale an Auto Scaling group based on configurable metrics. If no policies are defined, the dynamic scaling and predictive scaling features are not used.
For more information about using dynamic scaling, see Target tracking scaling policies and Step and simple scaling policies in the Amazon EC2 Auto Scaling User Guide.
For more information about using predictive scaling, see Predictive scaling for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
You can view the scaling policies for an Auto Scaling group using the DescribePolicies API call. If you are no longer using a scaling policy, you can delete it by calling the DeletePolicy API.
", "PutScheduledUpdateGroupAction": "Creates or updates a scheduled scaling action for an Auto Scaling group.
For more information, see Scheduled scaling in the Amazon EC2 Auto Scaling User Guide.
You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions API call. If you are no longer using a scheduled action, you can delete it by calling the DeleteScheduledAction API.
", "PutWarmPool": "Creates or updates a warm pool for the specified Auto Scaling group. A warm pool is a pool of pre-initialized EC2 instances that sits alongside the Auto Scaling group. Whenever your application needs to scale out, the Auto Scaling group can draw on the warm pool to meet its new desired capacity. For more information and example configurations, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
This operation must be called from the Region in which the Auto Scaling group was created. This operation cannot be called on an Auto Scaling group that has a mixed instances policy or a launch template or launch configuration that requests Spot Instances.
You can view the instances in the warm pool using the DescribeWarmPool API call. If you are no longer using a warm pool, you can delete it by calling the DeleteWarmPool API.
", - "RecordLifecycleActionHeartbeat": "Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates instances.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a pending state.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
", + "RecordLifecycleActionHeartbeat": "Records a heartbeat for the lifecycle action associated with the specified token or instance. This extends the timeout by the length of time defined using the PutLifecycleHook API call.
This step is a part of the procedure for adding a lifecycle hook to an Auto Scaling group:
(Optional) Create a launch template or launch configuration with a user data script that runs while an instance is in a wait state due to a lifecycle hook.
(Optional) Create a Lambda function and a rule that allows Amazon EventBridge to invoke your Lambda function when an instance is put into a wait state due to a lifecycle hook.
(Optional) Create a notification target and an IAM role. The target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the target.
Create the lifecycle hook. Specify whether the hook is used when the instances launch or terminate.
If you need more time, record the lifecycle action heartbeat to keep the instance in a wait state.
If you finish before the timeout period ends, send a callback by using the CompleteLifecycleAction API call.
For more information, see Amazon EC2 Auto Scaling lifecycle hooks in the Amazon EC2 Auto Scaling User Guide.
", "ResumeProcesses": "Resumes the specified suspended auto scaling processes, or all suspended process, for the specified Auto Scaling group.
For more information, see Suspending and resuming scaling processes in the Amazon EC2 Auto Scaling User Guide.
", "SetDesiredCapacity": "Sets the size of the specified Auto Scaling group.
If a scale-in activity occurs as a result of a new DesiredCapacity
value that is lower than the current size of the group, the Auto Scaling group uses its termination policy to determine which instances to terminate.
For more information, see Manual scaling in the Amazon EC2 Auto Scaling User Guide.
", "SetInstanceHealth": "Sets the health status of the specified instance.
For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.
", @@ -858,8 +858,8 @@ "base": null, "refs": { "AutoScalingGroup$HealthCheckGracePeriod": "The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check.
", - "CreateAutoScalingGroupType$HealthCheckGracePeriod": "The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. The default value is 0
. For more information, see Health check grace period in the Amazon EC2 Auto Scaling User Guide.
Conditional: Required if you are adding an ELB
health check.
The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. The default value is 0
. For more information, see Health check grace period in the Amazon EC2 Auto Scaling User Guide.
Conditional: Required if you are adding an ELB
health check.
The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. The default value is 0
. For more information, see Health check grace period in the Amazon EC2 Auto Scaling User Guide.
Required if you are adding an ELB
health check.
The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. The default value is 0
. For more information, see Health check grace period in the Amazon EC2 Auto Scaling User Guide.
Required if you are adding an ELB
health check.
The instance requirements. When you specify instance requirements, Amazon EC2 Auto Scaling finds instance types that satisfy your requirements, and then uses your On-Demand and Spot allocation strategies to launch instances from these instance types, in the same way as when you specify a list of specific instance types.
" } }, + "InstanceReusePolicy": { + "base": "Describes an instance reuse policy for a warm pool.
For more information, see Warm pools for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide.
", + "refs": { + "PutWarmPoolType$InstanceReusePolicy": "Indicates whether instances in the Auto Scaling group can be returned to the warm pool on scale in. The default is to terminate instances in the Auto Scaling group when the group scales in.
", + "WarmPoolConfiguration$InstanceReusePolicy": "The instance reuse policy.
" + } + }, "Instances": { "base": null, "refs": { @@ -1102,7 +1109,7 @@ "DesiredConfiguration$LaunchTemplate": null, "Instance$LaunchTemplate": "The launch template for the instance.
", "LaunchTemplate$LaunchTemplateSpecification": "The launch template to use.
", - "LaunchTemplateOverrides$LaunchTemplateSpecification": "Provides the launch template to be used when launching the instance type specified in InstanceType
. For example, some instance types might require a launch template with a different AMI. If not provided, Amazon EC2 Auto Scaling uses the launch template that's defined for your mixed instances policy. For more information, see Specifying a different launch template for an instance type in the Amazon EC2 Auto Scaling User Guide.
Provides a launch template for the specified instance type or instance requirements. For example, some instance types might require a launch template with a different AMI. If not provided, Amazon EC2 Auto Scaling uses the launch template that's defined for your mixed instances policy. For more information, see Specifying a different launch template for an instance type in the Amazon EC2 Auto Scaling User Guide.
", "UpdateAutoScalingGroupType$LaunchTemplate": "The launch template and version to use to specify the updates. If you specify LaunchTemplate
in your update request, you can't specify LaunchConfigurationName
or MixedInstancesPolicy
.
Describes a lifecycle hook, which enables an Auto Scaling group to be aware of events in the Auto Scaling instance lifecycle, and then perform a custom action when the corresponding lifecycle event occurs.
", + "base": "Describes a lifecycle hook. A lifecycle hook lets you create solutions that are aware of events in the Auto Scaling instance lifecycle, and then perform a custom action on instances when the corresponding lifecycle event occurs.
", "refs": { "LifecycleHooks$member": null } @@ -1382,7 +1389,7 @@ "PredictiveScalingMetricSpecification$TargetValue": "Specifies the target utilization.
Some metrics are based on a count instead of a percentage, such as the request count for an Application Load Balancer or the number of messages in an SQS queue. If the scaling policy specifies one of these metrics, specify the target utilization as the optimal average request or message count per instance during any one-minute interval.
The lower bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the lower bound is inclusive (the metric must be greater than or equal to the threshold plus the lower bound). Otherwise, it is exclusive (the metric must be greater than the threshold plus the lower bound). A null value indicates negative infinity.
", "StepAdjustment$MetricIntervalUpperBound": "The upper bound for the difference between the alarm threshold and the CloudWatch metric. If the metric value is above the breach threshold, the upper bound is exclusive (the metric must be less than the threshold plus the upper bound). Otherwise, it is inclusive (the metric must be less than or equal to the threshold plus the upper bound). A null value indicates positive infinity.
The upper bound must be greater than the lower bound.
", - "TargetTrackingConfiguration$TargetValue": "The target value for the metric.
" + "TargetTrackingConfiguration$TargetValue": "The target value for the metric.
Some metrics are based on a count instead of a percentage, such as the request count for an Application Load Balancer or the number of messages in an SQS queue. If the scaling policy specifies one of these metrics, specify the target utilization as the optimal average request or message count per instance during any one-minute interval.
The metric type. The following predefined metrics are available:
ASGAverageCPUUtilization
- Average CPU utilization of the Auto Scaling group.
ASGAverageNetworkIn
- Average number of bytes received on all network interfaces by the Auto Scaling group.
ASGAverageNetworkOut
- Average number of bytes sent out on all network interfaces by the Auto Scaling group.
ALBRequestCountPerTarget
- Number of requests completed per target in an Application Load Balancer target group.
The metric type. The following predefined metrics are available:
ASGAverageCPUUtilization
- Average CPU utilization of the Auto Scaling group.
ASGAverageNetworkIn
- Average number of bytes received (per instance per minute) for the Auto Scaling group.
ASGAverageNetworkOut
- Average number of bytes sent out (per instance per minute) for the Auto Scaling group.
ALBRequestCountPerTarget
- Average Application Load Balancer request count (per target per minute) for your Auto Scaling group.
The memory maximum in MiB.
", "BaselineEbsBandwidthMbpsRequest$Min": "The minimum value in Mbps.
", "BaselineEbsBandwidthMbpsRequest$Max": "The maximum value in Mbps.
", - "InstanceRequirements$SpotMaxPricePercentageOverLowestPrice": "The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
Default: 100
The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
Default: 20
The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.
Default: 100
The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999
.
If you set DesiredCapacityType
to vcpu
or memory-mib
, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price.
Default: 20
The memory minimum in MiB.
", "MemoryMiBRequest$Max": "The memory maximum in MiB.
", "NetworkInterfaceCountRequest$Min": "The minimum number of network interfaces.
", @@ -1844,6 +1851,12 @@ "MetricDataQuery$ReturnData": "Indicates whether to return the timestamps and raw data values of this metric.
If you use any math expressions, specify true
for this value for only the final math expression that the metric specification is based on. You must specify false
for ReturnData
for all the other metrics and expressions used in the metric specification.
If you are only retrieving metrics and not performing any math expressions, do not specify anything for ReturnData
. This sets it to its default (true
).
Specifies whether instances in the Auto Scaling group can be returned to the warm pool on scale in.
" + } + }, "ScalingActivityInProgressFault": { "base": "The operation can't be performed because there are scaling activities in progress.
", "refs": { @@ -2354,8 +2367,8 @@ "LaunchTemplateSpecification$LaunchTemplateId": "The ID of the launch template. To get the template ID, use the Amazon EC2 DescribeLaunchTemplates API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate API.
Conditional: You must specify either a LaunchTemplateId
or a LaunchTemplateName
.
The version number, $Latest
, or $Default
. To get the version number, use the Amazon EC2 DescribeLaunchTemplateVersions API operation. New launch template versions can be created using the Amazon EC2 CreateLaunchTemplateVersion API. If the value is $Latest
, Amazon EC2 Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default
, Amazon EC2 Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default
.
The name of the Auto Scaling group for the lifecycle hook.
", - "LifecycleHook$RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.
", - "LifecycleHookSpecification$RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.
", + "LifecycleHook$RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target (an Amazon SNS topic or an Amazon SQS queue).
", + "LifecycleHookSpecification$RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.
Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue. Required for new lifecycle hooks, but optional when updating existing hooks.
", "LimitExceededFault$message": "", "LoadBalancerNames$member": null, "LoadBalancerState$LoadBalancerName": "The name of the load balancer.
", @@ -2371,7 +2384,7 @@ "ProcessNames$member": null, "ProcessType$ProcessName": "One of the following processes:
Launch
Terminate
AddToLoadBalancer
AlarmNotification
AZRebalance
HealthCheck
InstanceRefresh
ReplaceUnhealthy
ScheduledActions
The name of the Auto Scaling group.
", - "PutLifecycleHookType$RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target, for example, an Amazon SNS topic or an Amazon SQS queue.
Required for new lifecycle hooks, but optional when updating existing hooks.
", + "PutLifecycleHookType$RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target.
Valid only if the notification target is an Amazon SNS topic or an Amazon SQS queue. Required for new lifecycle hooks, but optional when updating existing hooks.
", "PutNotificationConfigurationType$AutoScalingGroupName": "The name of the Auto Scaling group.
", "PutNotificationConfigurationType$TopicARN": "The Amazon Resource Name (ARN) of the Amazon SNS topic.
", "PutScalingPolicyType$AutoScalingGroupName": "The name of the Auto Scaling group.
", @@ -2420,7 +2433,7 @@ "CreateAutoScalingGroupType$HealthCheckType": "The service to use for the health checks. The valid values are EC2
(default) and ELB
. If you configure an Auto Scaling group to use load balancer (ELB) health checks, it considers the instance unhealthy if it fails either the EC2 status checks or the load balancer health checks. For more information, see Health checks for Auto Scaling instances in the Amazon EC2 Auto Scaling User Guide.
The last reported health status of the instance. \"Healthy\" means that the instance is healthy and should remain in service. \"Unhealthy\" means that the instance is unhealthy and that Amazon EC2 Auto Scaling should terminate and replace it.
", "Instance$WeightedCapacity": "The number of capacity units contributed by the instance based on its instance type.
Valid Range: Minimum value of 1. Maximum value of 999.
", - "LaunchTemplateOverrides$WeightedCapacity": "The number of capacity units provided by the instance type specified in InstanceType
in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling launches instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are two units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity
of five units, the instance is launched, and the desired capacity is exceeded by three units. For more information, see Instance weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999.
The number of capacity units provided by the instance type specified in InstanceType
in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling launches instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are two units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance with a WeightedCapacity
of five units, the instance is launched, and the desired capacity is exceeded by three units. For more information, see Configuring instance weighting for Amazon EC2 Auto Scaling in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of 1–999.
The aggregation type for the CloudWatch metrics. The valid values are Minimum
, Maximum
, and Average
. If the aggregation type is null, the value is treated as Average
.
Valid only if the policy type is StepScaling
.
The aggregation type for the CloudWatch metrics. The valid values are Minimum
, Maximum
, and Average
.
The health status of the instance. Set to Healthy
to have the instance remain in service. Set to Unhealthy
to have the instance be out of service. Amazon EC2 Auto Scaling terminates and replaces the unhealthy instance.
The number of Amazon S3 files to select.
" } }, + "MaxOutputFiles": { + "base": null, + "refs": { + "Output$MaxOutputFiles": "Maximum number of files to be generated by the job and written to the output folder. For output partitioned by column(s), the MaxOutputFiles value is the maximum number of files per partition.
" + } + }, "MaxResults100": { "base": null, "refs": { diff --git a/models/apis/fms/2018-01-01/api-2.json b/models/apis/fms/2018-01-01/api-2.json index f25fcd457c5..775b0a4cc47 100644 --- a/models/apis/fms/2018-01-01/api-2.json +++ b/models/apis/fms/2018-01-01/api-2.json @@ -509,9 +509,15 @@ "members":{ "ResourceId":{"shape":"ResourceId"}, "ViolationReason":{"shape":"ViolationReason"}, - "ResourceType":{"shape":"ResourceType"} + "ResourceType":{"shape":"ResourceType"}, + "Metadata":{"shape":"ComplianceViolatorMetadata"} } }, + "ComplianceViolatorMetadata":{ + "type":"map", + "key":{"shape":"LengthBoundedString"}, + "value":{"shape":"LengthBoundedString"} + }, "ComplianceViolators":{ "type":"list", "member":{"shape":"ComplianceViolator"} @@ -733,6 +739,27 @@ "type":"list", "member":{"shape":"ExpectedRoute"} }, + "FMSPolicyUpdateFirewallCreationConfigAction":{ + "type":"structure", + "members":{ + "Description":{"shape":"LengthBoundedString"}, + "FirewallCreationConfig":{"shape":"ManagedServiceData"} + } + }, + "FirewallDeploymentModel":{ + "type":"string", + "enum":["CENTRALIZED"] + }, + "FirewallSubnetIsOutOfScopeViolation":{ + "type":"structure", + "members":{ + "FirewallSubnetId":{"shape":"ResourceId"}, + "VpcId":{"shape":"ResourceId"}, + "SubnetAvailabilityZone":{"shape":"LengthBoundedString"}, + "SubnetAvailabilityZoneId":{"shape":"LengthBoundedString"}, + "VpcEndpointId":{"shape":"ResourceId"} + } + }, "GetAdminAccountRequest":{ "type":"structure", "members":{ @@ -1013,7 +1040,7 @@ "type":"string", "max":8192, "min":1, - "pattern":".*" + "pattern":"^((?!\\\\[nr]).)+" }, "MemberAccounts":{ "type":"list", @@ -1115,6 +1142,12 @@ "TargetViolationReason":{"shape":"TargetViolationReason"} } }, + "NetworkFirewallPolicy":{ + "type":"structure", + "members":{ + "FirewallDeploymentModel":{"shape":"FirewallDeploymentModel"} + } + }, "NetworkFirewallPolicyDescription":{ "type":"structure", "members":{ @@ -1249,6 +1282,12 @@ "min":36, "pattern":"^[a-z0-9A-Z-]{36}$" }, + "PolicyOption":{ + "type":"structure", + "members":{ + "NetworkFirewallPolicy":{"shape":"NetworkFirewallPolicy"} + } + }, "PolicySummary":{ "type":"structure", "members":{ @@ -1414,7 +1453,8 @@ "EC2CopyRouteTableAction":{"shape":"EC2CopyRouteTableAction"}, "EC2ReplaceRouteTableAssociationAction":{"shape":"EC2ReplaceRouteTableAssociationAction"}, "EC2AssociateRouteTableAction":{"shape":"EC2AssociateRouteTableAction"}, - "EC2CreateRouteTableAction":{"shape":"EC2CreateRouteTableAction"} + "EC2CreateRouteTableAction":{"shape":"EC2CreateRouteTableAction"}, + "FMSPolicyUpdateFirewallCreationConfigAction":{"shape":"FMSPolicyUpdateFirewallCreationConfigAction"} } }, "RemediationActionDescription":{ @@ -1524,7 +1564,9 @@ "DnsRuleGroupPriorityConflictViolation":{"shape":"DnsRuleGroupPriorityConflictViolation"}, "DnsDuplicateRuleGroupViolation":{"shape":"DnsDuplicateRuleGroupViolation"}, "DnsRuleGroupLimitExceededViolation":{"shape":"DnsRuleGroupLimitExceededViolation"}, - "PossibleRemediationActions":{"shape":"PossibleRemediationActions"} + "PossibleRemediationActions":{"shape":"PossibleRemediationActions"}, + "FirewallSubnetIsOutOfScopeViolation":{"shape":"FirewallSubnetIsOutOfScopeViolation"}, + "RouteHasOutOfScopeEndpointViolation":{"shape":"RouteHasOutOfScopeEndpointViolation"} } }, "ResourceViolations":{ @@ -1540,6 +1582,23 @@ "Target":{"shape":"LengthBoundedString"} } }, + "RouteHasOutOfScopeEndpointViolation":{ + "type":"structure", + "members":{ + "SubnetId":{"shape":"ResourceId"}, + "VpcId":{"shape":"ResourceId"}, + "RouteTableId":{"shape":"ResourceId"}, + "ViolatingRoutes":{"shape":"Routes"}, + "SubnetAvailabilityZone":{"shape":"LengthBoundedString"}, + "SubnetAvailabilityZoneId":{"shape":"LengthBoundedString"}, + "CurrentFirewallSubnetRouteTable":{"shape":"ResourceId"}, + "FirewallSubnetId":{"shape":"ResourceId"}, + "FirewallSubnetRoutes":{"shape":"Routes"}, + "InternetGatewayId":{"shape":"ResourceId"}, + "CurrentInternetGatewayRouteTable":{"shape":"ResourceId"}, + "InternetGatewayRoutes":{"shape":"Routes"} + } + }, "Routes":{ "type":"list", "member":{"shape":"Route"} @@ -1573,7 +1632,8 @@ "required":["Type"], "members":{ "Type":{"shape":"SecurityServiceType"}, - "ManagedServiceData":{"shape":"ManagedServiceData"} + "ManagedServiceData":{"shape":"ManagedServiceData"}, + "PolicyOption":{"shape":"PolicyOption"} } }, "SecurityServiceType":{ @@ -1762,7 +1822,9 @@ "INTERNET_TRAFFIC_NOT_INSPECTED", "BLACK_HOLE_ROUTE_DETECTED", "BLACK_HOLE_ROUTE_DETECTED_IN_FIREWALL_SUBNET", - "RESOURCE_MISSING_DNS_FIREWALL" + "RESOURCE_MISSING_DNS_FIREWALL", + "FIREWALL_SUBNET_IS_OUT_OF_SCOPE", + "ROUTE_HAS_OUT_OF_SCOPE_ENDPOINT" ] }, "ViolationTarget":{ diff --git a/models/apis/fms/2018-01-01/docs-2.json b/models/apis/fms/2018-01-01/docs-2.json index e795eb44ffb..410057e9db0 100644 --- a/models/apis/fms/2018-01-01/docs-2.json +++ b/models/apis/fms/2018-01-01/docs-2.json @@ -154,7 +154,7 @@ "ListProtocolsListsRequest$DefaultLists": "Specifies whether the lists to retrieve are default lists owned by Firewall Manager.
", "NetworkFirewallInternetTrafficNotInspectedViolation$IsRouteTableUsedInDifferentAZ": "Information about whether the route table is used in another Availability Zone.
", "NetworkFirewallInvalidRouteConfigurationViolation$IsRouteTableUsedInDifferentAZ": "Information about whether the route table is used in another Availability Zone.
", - "Policy$ExcludeResourceTags": "If set to True
, resources with the tags that are specified in the ResourceTag
array are not in scope of the policy. If set to False
, and the ResourceTag
array is not null, only resources with the specified tags are in scope of the policy.
If set to True
, resources with the tags that are specified in the ResourceTag
array are not in scope of the policy. If set to False
, and the ResourceTag
array is not null, only resources with the specified tags are in scope of the policy.
This option isn't available for the centralized deployment model when creating policies to configure Network Firewall.
", "Policy$RemediationEnabled": "Indicates if the policy should be automatically applied to new resources.
", "Policy$DeleteUnusedFMManagedResources": "Indicates whether Firewall Manager should automatically remove protections from resources that leave the policy scope and clean up resources that Firewall Manager is managing for accounts when those accounts leave policy scope. For example, Firewall Manager will disassociate a Firewall Manager managed web ACL from a protected customer resource when the customer resource leaves policy scope.
By default, Firewall Manager doesn't remove protections or delete Firewall Manager managed resources.
This option is not available for Shield Advanced or WAF Classic policies.
", "PolicyComplianceDetail$EvaluationLimitExceeded": "Indicates if over 100 resources are noncompliant with the Firewall Manager policy.
", @@ -186,6 +186,12 @@ "ComplianceViolators$member": null } }, + "ComplianceViolatorMetadata": { + "base": null, + "refs": { + "ComplianceViolator$Metadata": "Metadata about the resource that doesn't comply with the policy scope.
" + } + }, "ComplianceViolators": { "base": null, "refs": { @@ -213,8 +219,8 @@ "CustomerPolicyScopeMap": { "base": null, "refs": { - "Policy$IncludeMap": "Specifies the Amazon Web Services account IDs and Organizations organizational units (OUs) to include in the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.
You can specify inclusions or exclusions, but not both. If you specify an IncludeMap
, Firewall Manager applies the policy to all accounts specified by the IncludeMap
, and does not evaluate any ExcludeMap
specifications. If you do not specify an IncludeMap
, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap
.
You can specify account IDs, OUs, or a combination:
Specify account IDs by setting the key to ACCOUNT
. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}
.
Specify OUs by setting the key to ORG_UNIT
. For example, the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}
.
Specify accounts and OUs together in a single map, separated with a comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}
.
Specifies the Amazon Web Services account IDs and Organizations organizational units (OUs) to exclude from the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.
You can specify inclusions or exclusions, but not both. If you specify an IncludeMap
, Firewall Manager applies the policy to all accounts specified by the IncludeMap
, and does not evaluate any ExcludeMap
specifications. If you do not specify an IncludeMap
, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap
.
You can specify account IDs, OUs, or a combination:
Specify account IDs by setting the key to ACCOUNT
. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}
.
Specify OUs by setting the key to ORG_UNIT
. For example, the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}
.
Specify accounts and OUs together in a single map, separated with a comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}
.
Specifies the Amazon Web Services account IDs and Organizations organizational units (OUs) to include in the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.
You can specify inclusions or exclusions, but not both. If you specify an IncludeMap
, Firewall Manager applies the policy to all accounts specified by the IncludeMap
, and does not evaluate any ExcludeMap
specifications. If you do not specify an IncludeMap
, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap
.
You can specify account IDs, OUs, or a combination:
Specify account IDs by setting the key to ACCOUNT
. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}
.
Specify OUs by setting the key to ORG_UNIT
. For example, the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}
.
Specify accounts and OUs together in a single map, separated with a comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}
.
This option isn't available for the centralized deployment model when creating policies to configure Network Firewall.
", + "Policy$ExcludeMap": "Specifies the Amazon Web Services account IDs and Organizations organizational units (OUs) to exclude from the policy. Specifying an OU is the equivalent of specifying all accounts in the OU and in any of its child OUs, including any child OUs and accounts that are added at a later time.
You can specify inclusions or exclusions, but not both. If you specify an IncludeMap
, Firewall Manager applies the policy to all accounts specified by the IncludeMap
, and does not evaluate any ExcludeMap
specifications. If you do not specify an IncludeMap
, then Firewall Manager applies the policy to all accounts except for those specified by the ExcludeMap
.
You can specify account IDs, OUs, or a combination:
Specify account IDs by setting the key to ACCOUNT
. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}
.
Specify OUs by setting the key to ORG_UNIT
. For example, the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}
.
Specify accounts and OUs together in a single map, separated with a comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}
.
This option isn't available for the centralized deployment model when creating policies to configure Network Firewall.
" } }, "DeleteAppsListRequest": { @@ -372,6 +378,24 @@ "NetworkFirewallMissingExpectedRoutesViolation$ExpectedRoutes": "The expected routes.
" } }, + "FMSPolicyUpdateFirewallCreationConfigAction": { + "base": "Contains information about the actions that you can take to remediate scope violations caused by your policy's FirewallCreationConfig
. FirewallCreationConfig
is an optional configuration that you can use to choose which Availability Zones Firewall Manager creates Network Firewall endpoints in.
The remedial action to take when updating a firewall configuration.
" + } + }, + "FirewallDeploymentModel": { + "base": null, + "refs": { + "NetworkFirewallPolicy$FirewallDeploymentModel": "Defines the deployment model to use for the firewall policy. To use a distributed model, set PolicyOption to NULL
.
Contains details about the firewall subnet that violates the policy scope.
", + "refs": { + "ResourceViolation$FirewallSubnetIsOutOfScopeViolation": "Contains details about the firewall subnet that violates the policy scope.
" + } + }, "GetAdminAccountRequest": { "base": null, "refs": { @@ -492,6 +516,8 @@ "refs": { "ActionTarget$Description": "A description of the remediation action target.
", "AwsVPCSecurityGroupViolation$ViolationTargetDescription": "A description of the security group that violates the policy.
", + "ComplianceViolatorMetadata$key": null, + "ComplianceViolatorMetadata$value": null, "DnsDuplicateRuleGroupViolation$ViolationTargetDescription": "A description of the violation that specifies the rule group and VPC.
", "DnsRuleGroupLimitExceededViolation$ViolationTargetDescription": "A description of the violation that specifies the rule group and VPC.
", "DnsRuleGroupPriorityConflictViolation$ViolationTargetDescription": "A description of the violation that specifies the VPC and the rule group that's already associated with it.
", @@ -502,6 +528,9 @@ "EC2DeleteRouteAction$Description": "A description of the DeleteRoute action.
", "EC2ReplaceRouteAction$Description": "A description of the ReplaceRoute action in Amazon EC2.
", "EC2ReplaceRouteTableAssociationAction$Description": "A description of the ReplaceRouteTableAssociation action in Amazon EC2.
", + "FMSPolicyUpdateFirewallCreationConfigAction$Description": "Describes the remedial action.
", + "FirewallSubnetIsOutOfScopeViolation$SubnetAvailabilityZone": "The Availability Zone of the firewall subnet that violates the policy scope.
", + "FirewallSubnetIsOutOfScopeViolation$SubnetAvailabilityZoneId": "The Availability Zone ID of the firewall subnet that violates the policy scope.
", "LengthBoundedStringList$member": null, "NetworkFirewallInternetTrafficNotInspectedViolation$SubnetAvailabilityZone": "The subnet Availability Zone.
", "NetworkFirewallMissingExpectedRTViolation$AvailabilityZone": "The Availability Zone of a violating subnet.
", @@ -512,6 +541,8 @@ "RemediationAction$Description": "A description of a remediation action.
", "Route$Destination": "The destination of the route.
", "Route$Target": "The route's target.
", + "RouteHasOutOfScopeEndpointViolation$SubnetAvailabilityZone": "The subnet's Availability Zone.
", + "RouteHasOutOfScopeEndpointViolation$SubnetAvailabilityZoneId": "The ID of the subnet's Availability Zone.
", "SecurityGroupRuleDescription$Protocol": "The IP protocol name (tcp
, udp
, icmp
, icmpv6
) or number.
Brief description for the requested resource.
" } @@ -603,7 +634,8 @@ "ManagedServiceData": { "base": null, "refs": { - "SecurityServicePolicyData$ManagedServiceData": "Details about the service that are specific to the service type, in JSON format.
Example: DNS_FIREWALL
\"{\\\"type\\\":\\\"DNS_FIREWALL\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-1\\\",\\\"priority\\\":10}],\\\"postProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-2\\\",\\\"priority\\\":9911}]}\"
Valid values for preProcessRuleGroups
are between 1 and 99. Valid values for postProcessRuleGroups
are between 9901 and 10000.
Example: NETWORK_FIREWALL
\"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-west-1:1234567891011:stateless-rulegroup/rulegroup2\\\",\\\"priority\\\":10}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:pass\\\",\\\"custom1\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"custom2\\\",\\\"aws:pass\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"custom1\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"dimension1\\\"}]}}},{\\\"actionName\\\":\\\"custom2\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"dimension2\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-west-1:1234567891011:stateful-rulegroup/rulegroup1\\\"}],\\\"networkFirewallOrchestrationConfig\\\":{\\\"singleFirewallEndpointPerVPC\\\":true,\\\"allowedIPV4CidrList\\\":[\\\"10.24.34.0/28\\\"]} }\"
Specification for SHIELD_ADVANCED
for Amazon CloudFront distributions
\"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED|IGNORED|DISABLED\\\", \\\"automaticResponseAction\\\":\\\"BLOCK|COUNT\\\"}, \\\"overrideCustomerWebaclClassic\\\":true|false}\"
For example: \"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED\\\", \\\"automaticResponseAction\\\":\\\"COUNT\\\"}}\"
The default value for automaticResponseStatus
is IGNORED
. The value for automaticResponseAction
is only required when automaticResponseStatus
is set to ENABLED
. The default value for overrideCustomerWebaclClassic
is false
.
For other resource types that you can protect with a Shield Advanced policy, this ManagedServiceData
configuration is an empty string.
Example: WAFV2
\"{\\\"type\\\":\\\"WAFV2\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupArn\\\":null,\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"managedRuleGroupIdentifier\\\":{\\\"version\\\":null,\\\"vendorName\\\":\\\"AWS\\\",\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesAmazonIpReputationList\\\"},\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\",\\\"excludeRules\\\":[{\\\"name\\\":\\\"NoUserAgent_HEADER\\\"}]}],\\\"postProcessRuleGroups\\\":[],\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"overrideCustomerWebACLAssociation\\\":false,\\\"loggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[\\\"arn:aws:firehose:us-west-2:12345678912:deliverystream/aws-waf-logs-fms-admin-destination\\\"],\\\"redactedFields\\\":[{\\\"redactedFieldType\\\":\\\"SingleHeader\\\",\\\"redactedFieldValue\\\":\\\"Cookies\\\"},{\\\"redactedFieldType\\\":\\\"Method\\\"}]}}\"
In the loggingConfiguration
, you can specify one logDestinationConfigs
, you can optionally provide up to 20 redactedFields
, and the RedactedFieldType
must be one of URI
, QUERY_STRING
, HEADER
, or METHOD
.
Example: WAF Classic
\"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\":\\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}\"
Example: SECURITY_GROUPS_COMMON
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: Shared VPCs. Apply the preceding policy to resources in shared VPCs as well as to those in VPCs that the account owns
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"includeSharedVPC\\\":true,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: SECURITY_GROUPS_CONTENT_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\"sg-000e55995d61a06bd\\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"
The security group action for content audit can be ALLOW
or DENY
. For ALLOW
, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY
, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.
Example: SECURITY_GROUPS_USAGE_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"
A FirewallCreationConfig
that you can copy into your current policy's SecurityServiceData in order to remedy scope violations.
Details about the service that are specific to the service type, in JSON format.
Example: DNS_FIREWALL
\"{\\\"type\\\":\\\"DNS_FIREWALL\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-1\\\",\\\"priority\\\":10}],\\\"postProcessRuleGroups\\\":[{\\\"ruleGroupId\\\":\\\"rslvr-frg-2\\\",\\\"priority\\\":9911}]}\"
Valid values for preProcessRuleGroups
are between 1 and 99. Valid values for postProcessRuleGroups
are between 9901 and 10000.
Example: NETWORK_FIREWALL
- Centralized deployment model.
\"{\\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"awsNetworkFirewallConfig\\\":{\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}],\\\"networkFirewallStatelessDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessFragmentDefaultActions\\\":[\\\"aws:forward_to_sfe\\\",\\\"customActionName\\\"],\\\"networkFirewallStatelessCustomActions\\\":[{\\\"actionName\\\":\\\"customActionName\\\",\\\"actionDefinition\\\":{\\\"publishMetricAction\\\":{\\\"dimensions\\\":[{\\\"value\\\":\\\"metricdimensionvalue\\\"}]}}}],\\\"networkFirewallStatefulRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\"}],\\\"networkFirewallLoggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"ALERT\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}},{\\\"logDestinationType\\\":\\\"S3\\\",\\\"logType\\\":\\\"FLOW\\\",\\\"logDestination\\\":{\\\"bucketName\\\":\\\"s3-bucket-name\\\"}}],\\\"overrideExistingConfig\\\":true}},\\\"firewallDeploymentModel\\\":{\\\"centralizedFirewallDeploymentModel\\\":{\\\"centralizedFirewallOrchestrationConfig\\\":{\\\"inspectionVpcIds\\\":[{\\\"resourceId\\\":\\\"vpc-1234\\\",\\\"accountId\\\":\\\"123456789011\\\"}],\\\"firewallCreationConfig\\\":{\\\"endpointLocation\\\":{\\\"availabilityZoneConfigList\\\":[{\\\"availabilityZoneId\\\":null,\\\"availabilityZoneName\\\":\\\"us-east-1a\\\",\\\"allowedIPV4CidrList\\\":[\\\"10.0.0.0/28\\\"]}]}},\\\"allowedIPV4CidrList\\\":[]}}}}\"
To use the centralized deployment model, you must set PolicyOption to CENTRALIZED
.
Example: NETWORK_FIREWALL
- Distributed deployment model with automatic Availability Zone configuration. With automatic Availbility Zone configuration, Firewall Manager chooses which Availability Zones to create the endpoints in.
\"{ \\\"type\\\": \\\"NETWORK_FIREWALL\\\", \\\"networkFirewallStatelessRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\", \\\"priority\\\": 1 } ], \\\"networkFirewallStatelessDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessCustomActions\\\": [ { \\\"actionName\\\": \\\"customActionName\\\", \\\"actionDefinition\\\": { \\\"publishMetricAction\\\": { \\\"dimensions\\\": [ { \\\"value\\\": \\\"metricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\": { \\\"singleFirewallEndpointPerVPC\\\": false, \\\"allowedIPV4CidrList\\\": [ \\\"10.0.0.0/28\\\", \\\"192.168.0.0/28\\\" ], \\\"routeManagementAction\\\": \\\"OFF\\\" }, \\\"networkFirewallLoggingConfiguration\\\": { \\\"logDestinationConfigs\\\": [ { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"ALERT\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"FLOW\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\": true } }\"
To use the distributed deployment model, you must set PolicyOption to NULL
.
Example: NETWORK_FIREWALL
- Distributed deployment model with automatic Availability Zone configuration, and route management.
\"{ \\\"type\\\": \\\"NETWORK_FIREWALL\\\", \\\"networkFirewallStatelessRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\", \\\"priority\\\": 1 } ], \\\"networkFirewallStatelessDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\": [ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessCustomActions\\\": [ { \\\"actionName\\\": \\\"customActionName\\\", \\\"actionDefinition\\\": { \\\"publishMetricAction\\\": { \\\"dimensions\\\": [ { \\\"value\\\": \\\"metricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\": [ { \\\"resourceARN\\\": \\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\": { \\\"singleFirewallEndpointPerVPC\\\": false, \\\"allowedIPV4CidrList\\\": [ \\\"10.0.0.0/28\\\", \\\"192.168.0.0/28\\\" ], \\\"routeManagementAction\\\": \\\"MONITOR\\\", \\\"routeManagementTargetTypes\\\": [ \\\"InternetGateway\\\" ] }, \\\"networkFirewallLoggingConfiguration\\\": { \\\"logDestinationConfigs\\\": [ { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"ALERT\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\": \\\"S3\\\", \\\"logType\\\": \\\"FLOW\\\", \\\"logDestination\\\": { \\\"bucketName\\\": \\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\": true } }\"
Example: NETWORK_FIREWALL
- Distributed deployment model with custom Availability Zone configuration. With custom Availability Zone configuration, you define which specific Availability Zones to create endpoints in by configuring firewallCreationConfig
.
\"{ \\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}], \\\"networkFirewallStatelessDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"fragmentcustomactionname\\\" ], \\\"networkFirewallStatelessCustomActions\\\":[ { \\\"actionName\\\":\\\"customActionName\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"metricdimensionvalue\\\" } ] } } }, { \\\"actionName\\\":\\\"fragmentcustomactionname\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"fragmentmetricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\":[ { \\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\":{ \\\"firewallCreationConfig\\\":{ \\\"endpointLocation\\\":{ \\\"availabilityZoneConfigList\\\":[ { \\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1a\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] }, { ¯\\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1b\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] } ] } }, \\\"singleFirewallEndpointPerVPC\\\":false, \\\"allowedIPV4CidrList\\\":null, \\\"routeManagementAction\\\":\\\"OFF\\\", \\\"networkFirewallLoggingConfiguration\\\":{ \\\"logDestinationConfigs\\\":[ { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"ALERT\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"FLOW\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\":boolean } }\"
Example: NETWORK_FIREWALL
- Distributed deployment model with custom Availability Zone configuration, and route management.
\"{ \\\"type\\\":\\\"NETWORK_FIREWALL\\\",\\\"networkFirewallStatelessRuleGroupReferences\\\":[{\\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateless-rulegroup/test\\\",\\\"priority\\\":1}], \\\"networkFirewallStatelessDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"customActionName\\\" ], \\\"networkFirewallStatelessFragmentDefaultActions\\\":[ \\\"aws:forward_to_sfe\\\", \\\"fragmentcustomactionname\\\" ], \\\"networkFirewallStatelessCustomActions\\\":[ { \\\"actionName\\\":\\\"customActionName\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"metricdimensionvalue\\\" } ] } } }, { \\\"actionName\\\":\\\"fragmentcustomactionname\\\", \\\"actionDefinition\\\":{ \\\"publishMetricAction\\\":{ \\\"dimensions\\\":[ { \\\"value\\\":\\\"fragmentmetricdimensionvalue\\\" } ] } } } ], \\\"networkFirewallStatefulRuleGroupReferences\\\":[ { \\\"resourceARN\\\":\\\"arn:aws:network-firewall:us-east-1:123456789011:stateful-rulegroup/test\\\" } ], \\\"networkFirewallOrchestrationConfig\\\":{ \\\"firewallCreationConfig\\\":{ \\\"endpointLocation\\\":{ \\\"availabilityZoneConfigList\\\":[ { \\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1a\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] }, { ¯\\\"availabilityZoneId\\\":null, \\\"availabilityZoneName\\\":\\\"us-east-1b\\\", \\\"allowedIPV4CidrList\\\":[ \\\"10.0.0.0/28\\\" ] } ] } }, \\\"singleFirewallEndpointPerVPC\\\":false, \\\"allowedIPV4CidrList\\\":null, \\\"routeManagementAction\\\":\\\"MONITOR\\\", \\\"routeManagementTargetTypes\\\":[ \\\"InternetGateway\\\" ], \\\"routeManagementConfig\\\":{ \\\"allowCrossAZTrafficIfNoEndpoint\\\":true } }, \\\"networkFirewallLoggingConfiguration\\\":{ \\\"logDestinationConfigs\\\":[ { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"ALERT\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } }, { \\\"logDestinationType\\\":\\\"S3\\\", \\\"logType\\\":\\\"FLOW\\\", \\\"logDestination\\\":{ \\\"bucketName\\\":\\\"s3-bucket-name\\\" } } ], \\\"overrideExistingConfig\\\":boolean } }\"
Specification for SHIELD_ADVANCED
for Amazon CloudFront distributions
\"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED|IGNORED|DISABLED\\\", \\\"automaticResponseAction\\\":\\\"BLOCK|COUNT\\\"}, \\\"overrideCustomerWebaclClassic\\\":true|false}\"
For example: \"{\\\"type\\\":\\\"SHIELD_ADVANCED\\\",\\\"automaticResponseConfiguration\\\": {\\\"automaticResponseStatus\\\":\\\"ENABLED\\\", \\\"automaticResponseAction\\\":\\\"COUNT\\\"}}\"
The default value for automaticResponseStatus
is IGNORED
. The value for automaticResponseAction
is only required when automaticResponseStatus
is set to ENABLED
. The default value for overrideCustomerWebaclClassic
is false
.
For other resource types that you can protect with a Shield Advanced policy, this ManagedServiceData
configuration is an empty string.
Example: WAFV2
\"{\\\"type\\\":\\\"WAFV2\\\",\\\"preProcessRuleGroups\\\":[{\\\"ruleGroupArn\\\":null,\\\"overrideAction\\\":{\\\"type\\\":\\\"NONE\\\"},\\\"managedRuleGroupIdentifier\\\":{\\\"version\\\":null,\\\"vendorName\\\":\\\"AWS\\\",\\\"managedRuleGroupName\\\":\\\"AWSManagedRulesAmazonIpReputationList\\\"},\\\"ruleGroupType\\\":\\\"ManagedRuleGroup\\\",\\\"excludeRules\\\":[{\\\"name\\\":\\\"NoUserAgent_HEADER\\\"}]}],\\\"postProcessRuleGroups\\\":[],\\\"defaultAction\\\":{\\\"type\\\":\\\"ALLOW\\\"},\\\"overrideCustomerWebACLAssociation\\\":false,\\\"loggingConfiguration\\\":{\\\"logDestinationConfigs\\\":[\\\"arn:aws:firehose:us-west-2:12345678912:deliverystream/aws-waf-logs-fms-admin-destination\\\"],\\\"redactedFields\\\":[{\\\"redactedFieldType\\\":\\\"SingleHeader\\\",\\\"redactedFieldValue\\\":\\\"Cookies\\\"},{\\\"redactedFieldType\\\":\\\"Method\\\"}]}}\"
In the loggingConfiguration
, you can specify one logDestinationConfigs
, you can optionally provide up to 20 redactedFields
, and the RedactedFieldType
must be one of URI
, QUERY_STRING
, HEADER
, or METHOD
.
Example: WAF Classic
\"{\\\"type\\\": \\\"WAF\\\", \\\"ruleGroups\\\": [{\\\"id\\\":\\\"12345678-1bcd-9012-efga-0987654321ab\\\", \\\"overrideAction\\\" : {\\\"type\\\": \\\"COUNT\\\"}}], \\\"defaultAction\\\": {\\\"type\\\": \\\"BLOCK\\\"}}\"
Example: SECURITY_GROUPS_COMMON
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: Shared VPCs. Apply the preceding policy to resources in shared VPCs as well as to those in VPCs that the account owns
\"{\\\"type\\\":\\\"SECURITY_GROUPS_COMMON\\\",\\\"revertManualSecurityGroupChanges\\\":false,\\\"exclusiveResourceSecurityGroupManagement\\\":false, \\\"applyToAllEC2InstanceENIs\\\":false,\\\"includeSharedVPC\\\":true,\\\"securityGroups\\\":[{\\\"id\\\":\\\" sg-000e55995d61a06bd\\\"}]}\"
Example: SECURITY_GROUPS_CONTENT_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_CONTENT_AUDIT\\\",\\\"securityGroups\\\":[{\\\"id\\\":\\\"sg-000e55995d61a06bd\\\"}],\\\"securityGroupAction\\\":{\\\"type\\\":\\\"ALLOW\\\"}}\"
The security group action for content audit can be ALLOW
or DENY
. For ALLOW
, all in-scope security group rules must be within the allowed range of the policy's security group rules. For DENY
, all in-scope security group rules must not contain a value or a range that matches a rule value or range in the policy security group.
Example: SECURITY_GROUPS_USAGE_AUDIT
\"{\\\"type\\\":\\\"SECURITY_GROUPS_USAGE_AUDIT\\\",\\\"deleteUnusedSecurityGroups\\\":true,\\\"coalesceRedundantSecurityGroups\\\":true}\"
Violation detail for an Network Firewall policy that indicates that an Availability Zone is missing the expected Firewall Manager managed subnet.
" } }, + "NetworkFirewallPolicy": { + "base": "Configures the firewall policy deployment model of Network Firewall. For information about Network Firewall deployment models, see Network Firewall example architectures with routing in the Network Firewall Developer Guide.
", + "refs": { + "PolicyOption$NetworkFirewallPolicy": "Defines the deployment model to use for the firewall policy.
" + } + }, "NetworkFirewallPolicyDescription": { "base": "The definition of the Network Firewall firewall policy.
", "refs": { @@ -795,6 +833,12 @@ "ViolationDetail$PolicyId": "The ID of the Firewall Manager policy that the violation details were requested for.
" } }, + "PolicyOption": { + "base": "Contains the Network Firewall firewall policy options to configure a centralized deployment model.
", + "refs": { + "SecurityServicePolicyData$PolicyOption": "Contains the Network Firewall firewall policy options to configure a centralized deployment model.
" + } + }, "PolicySummary": { "base": "Details of the Firewall Manager policy.
", "refs": { @@ -992,6 +1036,9 @@ "EC2DeleteRouteAction$DestinationPrefixListId": "Information about the ID of the prefix list for the route.
", "EC2ReplaceRouteAction$DestinationPrefixListId": "Information about the ID of the prefix list for the route.
", "ExpectedRoute$RouteTableId": "Information about the route table ID.
", + "FirewallSubnetIsOutOfScopeViolation$FirewallSubnetId": "The ID of the firewall subnet that violates the policy scope.
", + "FirewallSubnetIsOutOfScopeViolation$VpcId": "The VPC ID of the firewall subnet that violates the policy scope.
", + "FirewallSubnetIsOutOfScopeViolation$VpcEndpointId": "The VPC endpoint ID of the firewall subnet that violates the policy scope.
", "GetViolationDetailsRequest$ResourceId": "The ID of the resource that has violations.
", "NetworkFirewallBlackHoleRouteDetectedViolation$RouteTableId": "Information about the route table ID.
", "NetworkFirewallBlackHoleRouteDetectedViolation$VpcId": "Information about the VPC ID.
", @@ -1026,6 +1073,13 @@ "NetworkFirewallUnexpectedGatewayRoutesViolation$RouteTableId": "Information about the route table.
", "NetworkFirewallUnexpectedGatewayRoutesViolation$VpcId": "Information about the VPC ID.
", "ResourceIdList$member": null, + "RouteHasOutOfScopeEndpointViolation$SubnetId": "The ID of the subnet associated with the route that violates the policy scope.
", + "RouteHasOutOfScopeEndpointViolation$VpcId": "The VPC ID of the route that violates the policy scope.
", + "RouteHasOutOfScopeEndpointViolation$RouteTableId": "The ID of the route table.
", + "RouteHasOutOfScopeEndpointViolation$CurrentFirewallSubnetRouteTable": "The route table associated with the current firewall subnet.
", + "RouteHasOutOfScopeEndpointViolation$FirewallSubnetId": "The ID of the firewall subnet.
", + "RouteHasOutOfScopeEndpointViolation$InternetGatewayId": "The ID of the Internet Gateway.
", + "RouteHasOutOfScopeEndpointViolation$CurrentInternetGatewayRouteTable": "The current route table associated with the Internet Gateway.
", "SecurityGroupRuleDescription$PrefixListId": "The ID of the prefix list for the security group rule.
", "StatefulRuleGroup$ResourceId": "The resource ID of the rule group.
", "StatelessRuleGroup$ResourceId": "The resource ID of the rule group.
", @@ -1118,6 +1172,12 @@ "Routes$member": null } }, + "RouteHasOutOfScopeEndpointViolation": { + "base": "Contains details about the route endpoint that violates the policy scope.
", + "refs": { + "ResourceViolation$RouteHasOutOfScopeEndpointViolation": "Contains details about the route endpoint that violates the policy scope.
" + } + }, "Routes": { "base": null, "refs": { @@ -1128,7 +1188,10 @@ "NetworkFirewallInvalidRouteConfigurationViolation$ActualFirewallSubnetRoutes": "The actual firewall subnet routes that are expected.
", "NetworkFirewallInvalidRouteConfigurationViolation$ActualInternetGatewayRoutes": "The actual internet gateway routes.
", "NetworkFirewallUnexpectedFirewallRoutesViolation$ViolatingRoutes": "The routes that are in violation.
", - "NetworkFirewallUnexpectedGatewayRoutesViolation$ViolatingRoutes": "The routes that are in violation.
" + "NetworkFirewallUnexpectedGatewayRoutesViolation$ViolatingRoutes": "The routes that are in violation.
", + "RouteHasOutOfScopeEndpointViolation$ViolatingRoutes": "The list of routes that violate the route table.
", + "RouteHasOutOfScopeEndpointViolation$FirewallSubnetRoutes": "The list of firewall subnet routes.
", + "RouteHasOutOfScopeEndpointViolation$InternetGatewayRoutes": "The routes in the route table associated with the Internet Gateway.
" } }, "SecurityGroupRemediationAction": { @@ -1220,7 +1283,7 @@ "PutPolicyRequest$TagList": "The tags to add to the Amazon Web Services resource.
", "PutProtocolsListRequest$TagList": "The tags associated with the resource.
", "TagResourceRequest$TagList": "The tags to add to the resource.
", - "ViolationDetail$ResourceTags": "The ResourceTag
objects associated with the resource.
The ResourceTag
objects associated with the resource.
This option isn't available for the centralized deployment model when creating policies to configure Network Firewall.
" } }, "TagResourceRequest": { diff --git a/models/apis/lightsail/2016-11-28/api-2.json b/models/apis/lightsail/2016-11-28/api-2.json index 625d2e95416..004956c5efe 100644 --- a/models/apis/lightsail/2016-11-28/api-2.json +++ b/models/apis/lightsail/2016-11-28/api-2.json @@ -4239,7 +4239,8 @@ "type":"structure", "required":["keyPairName"], "members":{ - "keyPairName":{"shape":"ResourceName"} + "keyPairName":{"shape":"ResourceName"}, + "expectedFingerprint":{"shape":"string"} } }, "DeleteKeyPairResult":{ @@ -4601,7 +4602,8 @@ "type":"structure", "members":{ "publicKeyBase64":{"shape":"Base64"}, - "privateKeyBase64":{"shape":"Base64"} + "privateKeyBase64":{"shape":"Base64"}, + "createdAt":{"shape":"IsoDate"} } }, "EligibleToRenew":{"type":"string"}, @@ -5270,7 +5272,8 @@ "GetKeyPairsRequest":{ "type":"structure", "members":{ - "pageToken":{"shape":"string"} + "pageToken":{"shape":"string"}, + "includeDefaultKeyPair":{"shape":"boolean"} } }, "GetKeyPairsResult":{ diff --git a/models/apis/lightsail/2016-11-28/docs-2.json b/models/apis/lightsail/2016-11-28/docs-2.json index 2c552c9b068..4f1241e6880 100644 --- a/models/apis/lightsail/2016-11-28/docs-2.json +++ b/models/apis/lightsail/2016-11-28/docs-2.json @@ -11,7 +11,7 @@ "CloseInstancePublicPorts": "Closes ports for a specific Amazon Lightsail instance.
The CloseInstancePublicPorts
action supports tag-based access control via resource tags applied to the resource identified by instanceName
. For more information, see the Amazon Lightsail Developer Guide.
Copies a manual snapshot of an instance or disk as another manual snapshot, or copies an automatic snapshot of an instance or disk as a manual snapshot. This operation can also be used to copy a manual or automatic snapshot of an instance or a disk from one AWS Region to another in Amazon Lightsail.
When copying a manual snapshot, be sure to define the source region
, source snapshot name
, and target snapshot name
parameters.
When copying an automatic snapshot, be sure to define the source region
, source resource name
, target snapshot name
, and either the restore date
or the use latest restorable auto snapshot
parameters.
Creates an Amazon Lightsail bucket.
A bucket is a cloud storage resource available in the Lightsail object storage service. Use buckets to store objects such as data and its descriptive metadata. For more information about buckets, see Buckets in Amazon Lightsail in the Amazon Lightsail Developer Guide.
", - "CreateBucketAccessKey": "Creates a new access key for the specified Amazon Lightsail bucket. Access keys consist of an access key ID and corresponding secret access key.
Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the GetBucketAccessKeys action to get a list of current access keys for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.
The secretAccessKey
value is returned only in response to the CreateBucketAccessKey
action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.
Creates a new access key for the specified Amazon Lightsail bucket. Access keys consist of an access key ID and corresponding secret access key.
Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the GetBucketAccessKeys action to get a list of current access keys for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.
The secretAccessKey
value is returned only in response to the CreateBucketAccessKey
action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.
Creates an SSL/TLS certificate for an Amazon Lightsail content delivery network (CDN) distribution and a container service.
After the certificate is valid, use the AttachCertificateToDistribution
action to use the certificate and its domains with your distribution. Or use the UpdateContainerService
action to use the certificate and its domains with your container service.
Only certificates created in the us-east-1
AWS Region can be attached to Lightsail distributions. Lightsail distributions are global resources that can reference an origin in any AWS Region, and distribute its content globally. However, all distributions are located in the us-east-1
Region.
Creates an AWS CloudFormation stack, which creates a new Amazon EC2 instance from an exported Amazon Lightsail snapshot. This operation results in a CloudFormation stack record that can be used to track the AWS CloudFormation stack created. Use the get cloud formation stack records
operation to get a list of the CloudFormation stacks created.
Wait until after your new Amazon EC2 instance is created before running the create cloud formation stack
operation again with the same export snapshot record.
Creates an email or SMS text message contact method.
A contact method is used to send you notifications about your Amazon Lightsail resources. You can add one email address and one mobile phone number contact method in each AWS Region. However, SMS text messaging is not supported in some AWS Regions, and SMS text messages cannot be sent to some countries/regions. For more information, see Notifications in Amazon Lightsail.
", @@ -27,7 +27,7 @@ "CreateInstanceSnapshot": "Creates a snapshot of a specific virtual private server, or instance. You can use a snapshot to create a new instance that is based on that snapshot.
The create instance snapshot
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates one or more Amazon Lightsail instances.
The create instances
operation supports tag-based access control via request tags. For more information, see the Lightsail Developer Guide.
Creates one or more new instances from a manual or automatic snapshot of an instance.
The create instances from snapshot
operation supports tag-based access control via request tags and resource tags applied to the resource identified by instance snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Creates an SSH key pair.
The create key pair
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates a custom SSH key pair that you can use with an Amazon Lightsail instance.
Use the DownloadDefaultKeyPair action to create a Lightsail default key pair in an Amazon Web Services Region where a default key pair does not currently exist.
The create key pair
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates a Lightsail load balancer. To learn more about deciding whether to load balance your application, see Configure your Lightsail instances for load balancing. You can create up to 5 load balancers per AWS Region in your account.
When you create a load balancer, you can specify a unique name and port settings. To change additional load balancer settings, use the UpdateLoadBalancerAttribute
operation.
The create load balancer
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Creates an SSL/TLS certificate for an Amazon Lightsail load balancer.
TLS is just an updated, more secure version of Secure Socket Layer (SSL).
The CreateLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Creates a new database in Amazon Lightsail.
The create relational database
operation supports tag-based access control via request tags. For more information, see the Amazon Lightsail Developer Guide.
Deletes a specific domain entry.
The delete domain entry
operation supports tag-based access control via resource tags applied to the resource identified by domain name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes an Amazon Lightsail instance.
The delete instance
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes a specific snapshot of a virtual private server (or instance).
The delete instance snapshot
operation supports tag-based access control via resource tags applied to the resource identified by instance snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes a specific SSH key pair.
The delete key pair
operation supports tag-based access control via resource tags applied to the resource identified by key pair name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes the specified key pair by removing the public key from Amazon Lightsail.
You can delete key pairs that were created using the ImportKeyPair and CreateKeyPair actions, as well as the Lightsail default key pair. A new default key pair will not be created unless you launch an instance without specifying a custom key pair, or you call the DownloadDefaultKeyPair API.
The delete key pair
operation supports tag-based access control via resource tags applied to the resource identified by key pair name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes the known host key or certificate used by the Amazon Lightsail browser-based SSH or RDP clients to authenticate an instance. This operation enables the Lightsail browser-based SSH or RDP clients to connect to the instance after a host key mismatch.
Perform this operation only if you were expecting the host key or certificate mismatch or if you are familiar with the new host key or certificate on the instance. For more information, see Troubleshooting connection issues when using the Amazon Lightsail browser-based SSH or RDP client.
Deletes a Lightsail load balancer and all its associated SSL/TLS certificates. Once the load balancer is deleted, you will need to create a new load balancer, create a new certificate, and verify domain ownership again.
The delete load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Deletes an SSL/TLS certificate associated with a Lightsail load balancer.
The DeleteLoadBalancerTlsCertificate
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Detaches the specified instances from a Lightsail load balancer.
This operation waits until the instances are no longer needed before they are detached from the load balancer.
The detach instances from load balancer
operation supports tag-based access control via resource tags applied to the resource identified by load balancer name
. For more information, see the Amazon Lightsail Developer Guide.
Detaches a static IP from the Amazon Lightsail instance to which it is attached.
", "DisableAddOn": "Disables an add-on for an Amazon Lightsail resource. For more information, see the Amazon Lightsail Developer Guide.
", - "DownloadDefaultKeyPair": "Downloads the default SSH key pair from the user's account.
", + "DownloadDefaultKeyPair": "Downloads the regional Amazon Lightsail default key pair.
This action also creates a Lightsail default key pair if a default key pair does not currently exist in the Amazon Web Services Region.
", "EnableAddOn": "Enables or modifies an add-on for an Amazon Lightsail resource. For more information, see the Amazon Lightsail Developer Guide.
", "ExportSnapshot": "Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the create cloud formation stack
operation to create new Amazon EC2 instances.
Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.
The export snapshot
operation supports tag-based access control via resource tags applied to the resource identified by source snapshot name
. For more information, see the Amazon Lightsail Developer Guide.
Use the get instance snapshots
or get disk snapshots
operations to get a list of snapshots that you can export to Amazon EC2.
Returns the names of all active (not deleted) resources.
", "GetAlarms": "Returns information about the configured alarms. Specify an alarm name in your request to return information about a specific alarm, or specify a monitored resource name to return information about all alarms for a specific resource.
An alarm is used to monitor a single metric for one of your resources. When a metric condition is met, the alarm can notify you by email, SMS text message, and a banner displayed on the Amazon Lightsail console. For more information, see Alarms in Amazon Lightsail.
", "GetAutoSnapshots": "Returns the available automatic snapshots for an instance or disk. For more information, see the Amazon Lightsail Developer Guide.
", "GetBlueprints": "Returns the list of available instance images, or blueprints. You can use a blueprint to create a new instance already running a specific operating system, as well as a preinstalled app or development stack. The software each instance is running depends on the blueprint image you choose.
Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
Returns the existing access key IDs for the specified Amazon Lightsail bucket.
This action does not return the secret access key value of an access key. You can get a secret access key only when you create it from the response of the CreateBucketAccessKey action. If you lose the secret access key, you must create a new access key.
Returns the bundles that you can apply to a Amazon Lightsail bucket.
The bucket bundle specifies the monthly cost, storage quota, and data transfer quota for a bucket.
Use the UpdateBucketBundle action to update the bundle for a bucket.
", + "GetBucketAccessKeys": "Returns the existing access key IDs for the specified Amazon Lightsail bucket.
This action does not return the secret access key value of an access key. You can get a secret access key only when you create it from the response of the CreateBucketAccessKey action. If you lose the secret access key, you must create a new access key.
Returns the bundles that you can apply to a Amazon Lightsail bucket.
The bucket bundle specifies the monthly cost, storage quota, and data transfer quota for a bucket.
Use the UpdateBucketBundle action to update the bundle for a bucket.
", "GetBucketMetricData": "Returns the data points of a specific metric for an Amazon Lightsail bucket.
Metrics report the utilization of a bucket. View and collect metric data regularly to monitor the number of objects stored in a bucket (including object versions) and the storage space used by those objects.
", "GetBuckets": "Returns information about one or more Amazon Lightsail buckets.
For more information about buckets, see Buckets in Amazon Lightsail in the Amazon Lightsail Developer Guide..
", "GetBundles": "Returns the list of bundles that are available for purchase. A bundle describes the specs for your virtual private server (or instance).
", @@ -91,7 +91,7 @@ "GetDistributions": "Returns information about one or more of your Amazon Lightsail content delivery network (CDN) distributions.
", "GetDomain": "Returns information about a specific domain recordset.
", "GetDomains": "Returns a list of all domains in the user's account.
", - "GetExportSnapshotRecords": "Returns all export snapshot records created as a result of the export snapshot
operation.
An export snapshot record can be used to create a new Amazon EC2 instance and its related resources with the CreateCloudFormationStack action.
", + "GetExportSnapshotRecords": "Returns all export snapshot records created as a result of the export snapshot
operation.
An export snapshot record can be used to create a new Amazon EC2 instance and its related resources with the CreateCloudFormationStack action.
", "GetInstance": "Returns information about a specific Amazon Lightsail instance, which is a virtual private server.
", "GetInstanceAccessDetails": "Returns temporary SSH keys you can use to connect to a specific virtual private server, or instance.
The get instance access details
operation supports tag-based access control via resource tags applied to the resource identified by instance name
. For more information, see the Amazon Lightsail Developer Guide.
Returns the data points for the specified Amazon Lightsail instance metric, given an instance name.
Metrics report the utilization of your resources, and the error counts generated by them. Monitor and collect metric data regularly to maintain the reliability, availability, and performance of your resources.
", @@ -147,7 +147,7 @@ "UnpeerVpc": "Unpeers the Lightsail VPC from the user's default VPC.
", "UntagResource": "Deletes the specified set of tag keys and their values from the specified Amazon Lightsail resource.
The untag resource
operation supports tag-based access control via request tags and resource tags applied to the resource identified by resource name
. For more information, see the Amazon Lightsail Developer Guide.
Updates an existing Amazon Lightsail bucket.
Use this action to update the configuration of an existing bucket, such as versioning, public accessibility, and the AWS accounts that can access the bucket.
", - "UpdateBucketBundle": "Updates the bundle, or storage plan, of an existing Amazon Lightsail bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket. You can update a bucket's bundle only one time within a monthly AWS billing cycle. To determine if you can update a bucket's bundle, use the GetBuckets action. The ableToUpdateBundle
parameter in the response will indicate whether you can currently update a bucket's bundle.
Update a bucket's bundle if it's consistently going over its storage space or data transfer quota, or if a bucket's usage is consistently in the lower range of its storage space or data transfer quota. Due to the unpredictable usage fluctuations that a bucket might experience, we strongly recommend that you update a bucket's bundle only as a long-term strategy, instead of as a short-term, monthly cost-cutting measure. Choose a bucket bundle that will provide the bucket with ample storage space and data transfer for a long time to come.
", + "UpdateBucketBundle": "Updates the bundle, or storage plan, of an existing Amazon Lightsail bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket. You can update a bucket's bundle only one time within a monthly AWS billing cycle. To determine if you can update a bucket's bundle, use the GetBuckets action. The ableToUpdateBundle
parameter in the response will indicate whether you can currently update a bucket's bundle.
Update a bucket's bundle if it's consistently going over its storage space or data transfer quota, or if a bucket's usage is consistently in the lower range of its storage space or data transfer quota. Due to the unpredictable usage fluctuations that a bucket might experience, we strongly recommend that you update a bucket's bundle only as a long-term strategy, instead of as a short-term, monthly cost-cutting measure. Choose a bucket bundle that will provide the bucket with ample storage space and data transfer for a long time to come.
", "UpdateContainerService": "Updates the configuration of your Amazon Lightsail container service, such as its power, scale, and public domain names.
", "UpdateDistribution": "Updates an existing Amazon Lightsail content delivery network (CDN) distribution.
Use this action to update the configuration of your existing distribution.
", "UpdateDistributionBundle": "Updates the bundle of your Amazon Lightsail content delivery network (CDN) distribution.
A distribution bundle specifies the monthly network transfer quota and monthly cost of your dsitribution.
Update your distribution's bundle if your distribution is going over its monthly network transfer quota and is incurring an overage fee.
You can update your distribution's bundle only one time within your monthly AWS billing cycle. To determine if you can update your distribution's bundle, use the GetDistributions
action. The ableToUpdateBundle
parameter in the result will indicate whether you can currently update your distribution's bundle.
Describes an access key for an Amazon Lightsail bucket.
Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the CreateBucketAccessKey action to create an access key for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.
The secretAccessKey
value is returned only in response to the CreateBucketAccessKey
action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.
Describes an access key for an Amazon Lightsail bucket.
Access keys grant full programmatic access to the specified bucket and its objects. You can have a maximum of two access keys per bucket. Use the CreateBucketAccessKey action to create an access key for a specific bucket. For more information about access keys, see Creating access keys for a bucket in Amazon Lightsail in the Amazon Lightsail Developer Guide.
The secretAccessKey
value is returned only in response to the CreateBucketAccessKey
action. You can get a secret access key only when you first create an access key; you cannot get the secret access key later. If you lose the secret access key, you must create a new access key.
An object that describes the access key that is created.
" } }, "AccessKeyLastUsed": { - "base": "Describes the last time an access key was used.
This object does not include data in the response of a CreateBucketAccessKey action.
Describes the last time an access key was used.
This object does not include data in the response of a CreateBucketAccessKey action.
An object that describes the last time the access key was used.
This object does not include data in the response of a CreateBucketAccessKey action. If the access key has not been used, the region
and serviceName
values are N/A
, and the lastUsedDate
value is null.
An object that describes the last time the access key was used.
This object does not include data in the response of a CreateBucketAccessKey action. If the access key has not been used, the region
and serviceName
values are N/A
, and the lastUsedDate
value is null.
An array of objects that describe Lightsail instances that have access to the bucket.
Use the SetResourceAccessForBucket action to update the instances that have access to a bucket.
" + "Bucket$resourcesReceivingAccess": "An array of objects that describe Lightsail instances that have access to the bucket.
Use the SetResourceAccessForBucket action to update the instances that have access to a bucket.
" } }, "AccessRules": { @@ -478,7 +478,7 @@ "CreateBucketAccessKeyRequest$bucketName": "The name of the bucket that the new access key will belong to, and grant access to.
", "CreateBucketRequest$bucketName": "The name for the bucket.
For more information about bucket names, see Bucket naming rules in Amazon Lightsail in the Amazon Lightsail Developer Guide.
", "DeleteBucketAccessKeyRequest$bucketName": "The name of the bucket that the access key belongs to.
", - "DeleteBucketRequest$bucketName": "The name of the bucket to delete.
Use the GetBuckets action to get a list of bucket names that you can specify.
", + "DeleteBucketRequest$bucketName": "The name of the bucket to delete.
Use the GetBuckets action to get a list of bucket names that you can specify.
", "GetBucketAccessKeysRequest$bucketName": "The name of the bucket for which to return access keys.
", "GetBucketMetricDataRequest$bucketName": "The name of the bucket for which to get metric data.
", "GetBucketsRequest$bucketName": "The name of the bucket for which to return information.
When omitted, the response includes all of your buckets in the AWS Region where the request is made.
", @@ -522,7 +522,7 @@ } }, "CacheBehaviorPerPath": { - "base": "Describes the per-path cache behavior of an Amazon Lightsail content delivery network (CDN) distribution.
A per-path cache behavior is used to override, or add an exception to, the default cache behavior of a distribution. For example, if the cacheBehavior
is set to cache
, then a per-path cache behavior can be used to specify a directory, file, or file type that your distribution will cache. Alternately, if the distribution's cacheBehavior
is dont-cache
, then a per-path cache behavior can be used to specify a directory, file, or file type that your distribution will not cache.
if the cacheBehavior's behavior is set to 'cache', then
", + "base": "Describes the per-path cache behavior of an Amazon Lightsail content delivery network (CDN) distribution.
A per-path cache behavior is used to override, or add an exception to, the default cache behavior of a distribution. For example, if the cacheBehavior
is set to cache
, then a per-path cache behavior can be used to specify a directory, file, or file type that your distribution will cache. Alternately, if the distribution's cacheBehavior
is dont-cache
, then a per-path cache behavior can be used to specify a directory, file, or file type that your distribution will not cache.
Describes the origin resource of an Amazon Lightsail content delivery network (CDN) distribution.
An origin can be a Lightsail instance or load balancer. A distribution pulls content from an origin, caches it, and serves it to viewers via a worldwide network of edge servers.
", + "base": "Describes the origin resource of an Amazon Lightsail content delivery network (CDN) distribution.
An origin can be a Lightsail instance, bucket, or load balancer. A distribution pulls content from an origin, caches it, and serves it to viewers via a worldwide network of edge servers.
", "refs": { - "CreateDistributionRequest$origin": "An object that describes the origin resource for the distribution, such as a Lightsail instance or load balancer.
The distribution pulls, caches, and serves content from the origin.
", - "UpdateDistributionRequest$origin": "An object that describes the origin resource for the distribution, such as a Lightsail instance or load balancer.
The distribution pulls, caches, and serves content from the origin.
" + "CreateDistributionRequest$origin": "An object that describes the origin resource for the distribution, such as a Lightsail instance, bucket, or load balancer.
The distribution pulls, caches, and serves content from the origin.
", + "UpdateDistributionRequest$origin": "An object that describes the origin resource for the distribution, such as a Lightsail instance, bucket, or load balancer.
The distribution pulls, caches, and serves content from the origin.
" } }, "Instance": { @@ -2563,6 +2563,7 @@ "Disk$createdAt": "The date when the disk was created.
", "DiskSnapshot$createdAt": "The date when the disk snapshot was created.
", "Domain$createdAt": "The date when the domain recordset was created.
", + "DownloadDefaultKeyPairResult$createdAt": "The timestamp when the default key pair was created.
", "ExportSnapshotRecord$createdAt": "The date when the export snapshot record was created.
", "ExportSnapshotRecordSourceInfo$createdAt": "The date when the source instance or disk snapshot was created.
", "GetBucketMetricDataRequest$startTime": "The timestamp indicating the earliest data to be returned.
", @@ -2877,7 +2878,7 @@ "Blueprint$group": "The group name of the blueprint (e.g., amazon-linux
).
The Lightsail resource type of the bucket (for example, Bucket
).
The Amazon Resource Name (ARN) of the bucket.
", - "Bucket$bundleId": "The ID of the bundle currently applied to the bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.
Use the UpdateBucketBundle action to change the bundle of a bucket.
", + "Bucket$bundleId": "The ID of the bundle currently applied to the bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.
Use the UpdateBucketBundle action to change the bundle of a bucket.
", "Bucket$url": "The URL of the bucket.
", "Bucket$supportCode": "The support code for a bucket. Include this code in your email to support when you have questions about a Lightsail bucket. This code enables our support team to look up your Lightsail information more easily.
", "Bucket$objectVersioning": "Indicates whether object versioning is enabled for the bucket.
The following options can be configured:
Enabled
- Object versioning is enabled.
Suspended
- Object versioning was previously enabled but is currently suspended. Existing object versions are retained.
NeverEnabled
- Object versioning has never been enabled.
The destination of the contact method, such as an email address or a mobile phone number.
", "ContactMethod$arn": "The Amazon Resource Name (ARN) of the contact method.
", "ContainerService$arn": "The Amazon Resource Name (ARN) of the container service.
", - "CreateBucketRequest$bundleId": "The ID of the bundle to use for the bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.
Use the GetBucketBundles action to get a list of bundle IDs that you can specify.
Use the UpdateBucketBundle action to change the bundle after the bucket is created.
", + "CreateBucketRequest$bundleId": "The ID of the bundle to use for the bucket.
A bucket bundle specifies the monthly cost, storage space, and data transfer quota for a bucket.
Use the GetBucketBundles action to get a list of bundle IDs that you can specify.
Use the UpdateBucketBundle action to change the bundle after the bucket is created.
", "CreateDiskFromSnapshotRequest$availabilityZone": "The Availability Zone where you want to create the disk (e.g., us-east-2a
). Choose the same Availability Zone as the Lightsail instance where you want to create the disk.
Use the GetRegions operation to list the Availability Zones where Lightsail is currently available.
", "CreateDiskRequest$availabilityZone": "The Availability Zone where you want to create the disk (e.g., us-east-2a
). Use the same Availability Zone as the Lightsail instance to which you want to attach the disk.
Use the get regions
operation to list the Availability Zones where Lightsail is currently available.
The bundle of specification information for your virtual private server (or instance), including the pricing plan (e.g., micro_1_0
).
The ID for a virtual private server image (e.g., app_wordpress_4_4
or app_lamp_7_0
). Use the get blueprints
operation to return a list of available images (or blueprints).
Use active blueprints when creating new instances. Inactive blueprints are listed to support customers with existing instances and are not necessarily available to create new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
The bundle of specification information for your virtual private server (or instance), including the pricing plan (e.g., micro_1_0
).
The ID of the access key to delete.
Use the GetBucketAccessKeys action to get a list of access key IDs that you can specify.
", + "DeleteBucketAccessKeyRequest$accessKeyId": "The ID of the access key to delete.
Use the GetBucketAccessKeys action to get a list of access key IDs that you can specify.
", "DestinationInfo$id": "The ID of the resource created at the destination.
", "DestinationInfo$service": "The destination service of the record.
", "Disk$arn": "The Amazon Resource Name (ARN) of the disk.
", @@ -2966,7 +2967,7 @@ "ResourceReceivingAccess$name": "The name of the Lightsail instance.
", "ResourceReceivingAccess$resourceType": "The Lightsail resource type (for example, Instance
).
The Amazon Resource Name (ARN) of the static IP (e.g., arn:aws:lightsail:us-east-2:123456789101:StaticIp/9cbb4a9e-f8e3-4dfe-b57e-12345EXAMPLE
).
The ID of the new bundle to apply to the bucket.
Use the GetBucketBundles action to get a list of bundle IDs that you can specify.
", + "UpdateBucketBundleRequest$bundleId": "The ID of the new bundle to apply to the bucket.
Use the GetBucketBundles action to get a list of bundle IDs that you can specify.
", "UpdateBucketRequest$versioning": "Specifies whether to enable or suspend versioning of objects in the bucket.
The following options can be specified:
Enabled
- Enables versioning of objects in the specified bucket.
Suspended
- Suspends versioning of objects in the specified bucket. Existing object versions are retained.
Describes the origin resource of an Amazon Lightsail content delivery network (CDN) distribution.
An origin can be a Lightsail instance or load balancer. A distribution pulls content from an origin, caches it, and serves it to viewers via a worldwide network of edge servers.
", + "base": "Describes the origin resource of an Amazon Lightsail content delivery network (CDN) distribution.
An origin can be a Lightsail instance, bucket, or load balancer. A distribution pulls content from an origin, caches it, and serves it to viewers via a worldwide network of edge servers.
", "refs": { - "LightsailDistribution$origin": "An object that describes the origin resource of the distribution, such as a Lightsail instance or load balancer.
The distribution pulls, caches, and serves content from the origin.
" + "LightsailDistribution$origin": "An object that describes the origin resource of the distribution, such as a Lightsail instance, bucket, or load balancer.
The distribution pulls, caches, and serves content from the origin.
" } }, "OriginProtocolPolicyEnum": { @@ -3868,7 +3869,7 @@ "Certificate$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", "CertificateSummary$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", "ContainerService$tags": "The tag keys and optional values for the resource. For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", - "CreateBucketRequest$tags": "The tag keys and optional values to add to the bucket during creation.
Use the TagResource action to tag the bucket after it's created.
", + "CreateBucketRequest$tags": "The tag keys and optional values to add to the bucket during creation.
Use the TagResource action to tag the bucket after it's created.
", "CreateCertificateRequest$tags": "The tag keys and optional values to add to the certificate during create.
Use the TagResource
action to tag a resource after it's created.
The tag keys and optional values to add to the container service during create.
Use the TagResource
action to tag a resource after it's created.
For more information about tags in Lightsail, see the Amazon Lightsail Developer Guide.
", "CreateDiskFromSnapshotRequest$tags": "The tag keys and optional values to add to the resource during create.
Use the TagResource
action to tag a resource after it's created.
A Boolean value that indicates whether the access control list (ACL) permissions that are applied to individual objects override the getObject
option that is currently specified.
When this is true, you can use the PutObjectAcl Amazon S3 API action to set individual objects to public (read-only) using the public-read
ACL, or to private using the private
ACL.
Indicates whether the alarm is enabled.
", "Blueprint$isActive": "A Boolean value indicating whether the blueprint is active. Inactive blueprints are listed to support customers with existing instances but are not necessarily available for launch of new instances. Blueprints are marked inactive when they become outdated due to operating system updates or new application releases.
", - "Bucket$ableToUpdateBundle": "Indicates whether the bundle that is currently applied to a bucket can be changed to another bundle.
You can update a bucket's bundle only one time within a monthly AWS billing cycle.
Use the UpdateBucketBundle action to change a bucket's bundle.
", + "Bucket$ableToUpdateBundle": "Indicates whether the bundle that is currently applied to a bucket can be changed to another bundle.
You can update a bucket's bundle only one time within a monthly AWS billing cycle.
Use the UpdateBucketBundle action to change a bucket's bundle.
", "BucketAccessLogConfig$enabled": "A Boolean value that indicates whether bucket access logging is enabled for the bucket.
", "BucketBundle$isActive": "Indicates whether the bundle is active. Use for a new or existing bucket.
", "Bundle$isActive": "A Boolean value indicating whether the bundle is active.
", @@ -4074,7 +4075,7 @@ "CreateRelationalDatabaseFromSnapshotRequest$publiclyAccessible": "Specifies the accessibility options for your new database. A value of true
specifies a database that is available to resources outside of your Lightsail account. A value of false
specifies a database that is available only to your Lightsail resources in the same region as your database.
Specifies whether your database is restored from the latest backup time. A value of true
restores from the latest backup time.
Default: false
Constraints: Cannot be specified if the restore time
parameter is provided.
Specifies the accessibility options for your new database. A value of true
specifies a database that is available to resources outside of your Lightsail account. A value of false
specifies a database that is available only to your Lightsail resources in the same region as your database.
A Boolean value that indicates whether to force delete the bucket.
You must force delete the bucket if it has one of the following conditions:
The bucket is the origin of a distribution.
The bucket has instances that were granted access to it using the SetResourceAccessForBucket action.
The bucket has objects.
The bucket has access keys.
Force deleting a bucket might impact other resources that rely on the bucket, such as instances, distributions, or software that use the issued access keys.
A Boolean value that indicates whether to force delete the bucket.
You must force delete the bucket if it has one of the following conditions:
The bucket is the origin of a distribution.
The bucket has instances that were granted access to it using the SetResourceAccessForBucket action.
The bucket has objects.
The bucket has access keys.
Force deleting a bucket might impact other resources that rely on the bucket, such as instances, distributions, or software that use the issued access keys.
A Boolean value to indicate whether to delete the enabled add-ons for the disk.
", "DeleteInstanceRequest$forceDeleteAddOns": "A Boolean value to indicate whether to delete the enabled add-ons for the disk.
", "DeleteLoadBalancerTlsCertificateRequest$force": "When true
, forces the deletion of an SSL/TLS certificate.
There can be two certificates associated with a Lightsail load balancer: the primary and the backup. The force
parameter is required when the primary SSL/TLS certificate is in use by an instance attached to the load balancer.
When true
, specifies whether the domain entry is an alias used by the Lightsail load balancer. You can include an alias (A type) record in your request, which points to a load balancer DNS name and routes traffic to your load balancer.
A Boolean value indicating whether to include inactive results in your request.
", "GetBucketBundlesRequest$includeInactive": "A Boolean value that indicates whether to include inactive (unavailable) bundles in the response.
", - "GetBucketsRequest$includeConnectedResources": "A Boolean value that indicates whether to include Lightsail instances that were given access to the bucket using the SetResourceAccessForBucket action.
", + "GetBucketsRequest$includeConnectedResources": "A Boolean value that indicates whether to include Lightsail instances that were given access to the bucket using the SetResourceAccessForBucket action.
", "GetBundlesRequest$includeInactive": "A Boolean value that indicates whether to include inactive bundle results in your request.
", + "GetKeyPairsRequest$includeDefaultKeyPair": "A Boolean value that indicates whether to include the default key pair in the response of your request.
", "GetRegionsRequest$includeAvailabilityZones": "A Boolean value indicating whether to also include Availability Zones in your get regions request. Availability Zones are indicated with a letter: e.g., us-east-2a
.
A Boolean value indicating whether to also include Availability Zones for databases in your get regions request. Availability Zones are indicated with a letter (e.g., us-east-2a
).
Parameter to specify if the log should start from head or tail. If true
is specified, the log event starts from the head of the log. If false
is specified, the log event starts from the tail of the log.
For PostgreSQL, the default value of false
is the only option available.
The daily time range during which automated backups are created for your new database if automated backups are enabled.
The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. For more information about the preferred backup window time blocks for each region, see the Working With Backups guide in the Amazon Relational Database Service (Amazon RDS) documentation.
Constraints:
Must be in the hh24:mi-hh24:mi
format.
Example: 16:00-16:30
Specified in Coordinated Universal Time (UTC).
Must not conflict with the preferred maintenance window.
Must be at least 30 minutes.
The weekly time range during which system maintenance can occur on your new database.
The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week.
Constraints:
Must be in the ddd:hh24:mi-ddd:hh24:mi
format.
Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.
Must be at least 30 minutes.
Specified in Coordinated Universal Time (UTC).
Example: Tue:17:00-Tue:17:30
The name of the container image to delete from the container service.
Use the GetContainerImages
action to get the name of the container images that are registered to a container service.
Container images sourced from your Lightsail container service, that are registered and stored on your service, start with a colon (:
). For example, :container-service-1.mystaticwebsite.1
. Container images sourced from a public registry like Docker Hub don't start with a colon. For example, nginx:latest
or nginx
.
The RSA fingerprint of the Lightsail default key pair to delete.
The expectedFingerprint
parameter is required only when specifying to delete a Lightsail default key pair.
The support code. Include this code in your email to support when you have questions about an instance or another resource in Lightsail. This code enables our support team to look up your Lightsail information more easily.
", "Disk$path": "The disk path.
", "Disk$attachmentState": "(Deprecated) The attachment state of the disk.
In releases prior to November 14, 2017, this parameter returned attached
for system disks in the API response. It is now deprecated, but still included in the response. Use isAttached
instead.
Amazon Route 53 is a highly available and scalable Domain Name System (DNS) web service.
", "operations": { "ActivateKeySigningKey": "Activates a key-signing key (KSK) so that it can be used for signing by DNSSEC. This operation changes the KSK status to ACTIVE
.
Associates an Amazon VPC with a private hosted zone.
To perform the association, the VPC and the private hosted zone must already exist. You can't convert a public hosted zone into a private hosted zone.
If you want to associate a VPC that was created by using one Amazon Web Services account with a private hosted zone that was created by using a different account, the Amazon Web Services account that created the private hosted zone must first submit a CreateVPCAssociationAuthorization
request. Then the account that created the VPC must submit an AssociateVPCWithHostedZone
request.
Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets
to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.
Deleting Resource Record Sets
To delete a resource record set, you must specify all the same values that you specified when you created it.
Change Batches and Transactional Changes
The request body must include a document with a ChangeResourceRecordSetsRequest
element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.
For example, suppose a change batch request contains two changes: it deletes the CNAME
resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE
or the CREATE
action fails, then the request is canceled, and the original CNAME
record continues to exist.
If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch
error.
Traffic Flow
To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.
Create, Delete, and Upsert
Use ChangeResourceRecordsSetsRequest
to perform the following actions:
CREATE
: Creates a resource record set that has the specified values.
DELETE
: Deletes an existing resource record set that has the specified values.
UPSERT
: If a resource record set does not already exist, Amazon Web Services creates it. If a resource set does exist, Route 53 updates it with the values in the request.
Syntaxes for Creating, Updating, and Deleting Resource Record Sets
The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.
For an example for each type of resource record set, see \"Examples.\"
Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets
.
Change Propagation to Route 53 DNS Servers
When you submit a ChangeResourceRecordSets
request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange
returns a status of PENDING
. When propagation is complete, GetChange
returns a status of INSYNC
. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.
Limits on ChangeResourceRecordSets Requests
For information about the limits on a ChangeResourceRecordSets
request, see Limits in the Amazon Route 53 Developer Guide.
Associates an Amazon VPC with a private hosted zone.
To perform the association, the VPC and the private hosted zone must already exist. You can't convert a public hosted zone into a private hosted zone.
If you want to associate a VPC that was created by using one Amazon Web Services account with a private hosted zone that was created by using a different account, the Amazon Web Services account that created the private hosted zone must first submit a CreateVPCAssociationAuthorization
request. Then the account that created the VPC must submit an AssociateVPCWithHostedZone
request.
When granting access, the hosted zone and the Amazon VPC must belong to the same partition. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.
The following are the supported partitions:
aws
- Amazon Web Services Regions
aws-cn
- China Regions
aws-us-gov
- Amazon Web Services GovCloud (US) Region
For more information, see Access Management in the Amazon Web Services General Reference.
Creates, changes, or deletes a resource record set, which contains authoritative DNS information for a specified domain name or subdomain name. For example, you can use ChangeResourceRecordSets
to create a resource record set that routes traffic for test.example.com to a web server that has an IP address of 192.0.2.44.
Deleting Resource Record Sets
To delete a resource record set, you must specify all the same values that you specified when you created it.
Change Batches and Transactional Changes
The request body must include a document with a ChangeResourceRecordSetsRequest
element. The request body contains a list of change items, known as a change batch. Change batches are considered transactional changes. Route 53 validates the changes in the request and then either makes all or none of the changes in the change batch request. This ensures that DNS routing isn't adversely affected by partial changes to the resource record sets in a hosted zone.
For example, suppose a change batch request contains two changes: it deletes the CNAME
resource record set for www.example.com and creates an alias resource record set for www.example.com. If validation for both records succeeds, Route 53 deletes the first resource record set and creates the second resource record set in a single operation. If validation for either the DELETE
or the CREATE
action fails, then the request is canceled, and the original CNAME
record continues to exist.
If you try to delete the same resource record set more than once in a single change batch, Route 53 returns an InvalidChangeBatch
error.
Traffic Flow
To create resource record sets for complex routing configurations, use either the traffic flow visual editor in the Route 53 console or the API actions for traffic policies and traffic policy instances. Save the configuration as a traffic policy, then associate the traffic policy with one or more domain names (such as example.com) or subdomain names (such as www.example.com), in the same hosted zone or in multiple hosted zones. You can roll back the updates if the new configuration isn't performing as expected. For more information, see Using Traffic Flow to Route DNS Traffic in the Amazon Route 53 Developer Guide.
Create, Delete, and Upsert
Use ChangeResourceRecordsSetsRequest
to perform the following actions:
CREATE
: Creates a resource record set that has the specified values.
DELETE
: Deletes an existing resource record set that has the specified values.
UPSERT
: If a resource set exists Route 53 updates it with the values in the request.
Syntaxes for Creating, Updating, and Deleting Resource Record Sets
The syntax for a request depends on the type of resource record set that you want to create, delete, or update, such as weighted, alias, or failover. The XML elements in your request must appear in the order listed in the syntax.
For an example for each type of resource record set, see \"Examples.\"
Don't refer to the syntax in the \"Parameter Syntax\" section, which includes all of the elements for every kind of resource record set that you can create, delete, or update by using ChangeResourceRecordSets
.
Change Propagation to Route 53 DNS Servers
When you submit a ChangeResourceRecordSets
request, Route 53 propagates your changes to all of the Route 53 authoritative DNS servers. While your changes are propagating, GetChange
returns a status of PENDING
. When propagation is complete, GetChange
returns a status of INSYNC
. Changes generally propagate to all Route 53 name servers within 60 seconds. For more information, see GetChange.
Limits on ChangeResourceRecordSets Requests
For information about the limits on a ChangeResourceRecordSets
request, see Limits in the Amazon Route 53 Developer Guide.
Adds, edits, or deletes tags for a health check or a hosted zone.
For information about using tags for cost allocation, see Using Cost Allocation Tags in the Billing and Cost Management User Guide.
", "CreateHealthCheck": "Creates a new health check.
For information about adding health checks to resource record sets, see HealthCheckId in ChangeResourceRecordSets.
ELB Load Balancers
If you're registering EC2 instances with an Elastic Load Balancing (ELB) load balancer, do not create Amazon Route 53 health checks for the EC2 instances. When you register an EC2 instance with a load balancer, you configure settings for an ELB health check, which performs a similar function to a Route 53 health check.
Private Hosted Zones
You can associate health checks with failover resource record sets in a private hosted zone. Note the following:
Route 53 health checkers are outside the VPC. To check the health of an endpoint within a VPC by IP address, you must assign a public IP address to the instance in the VPC.
You can configure a health checker to check the health of an external resource that the instance relies on, such as a database server.
You can create a CloudWatch metric, associate an alarm with the metric, and then create a health check that is based on the state of the alarm. For example, you might create a CloudWatch metric that checks the status of the Amazon EC2 StatusCheckFailed
metric, add an alarm to the metric, and then create a health check that is based on the state of the alarm. For information about creating CloudWatch metrics and alarms by using the CloudWatch console, see the Amazon CloudWatch User Guide.
Creates a new public or private hosted zone. You create records in a public hosted zone to define how you want to route traffic on the internet for a domain, such as example.com, and its subdomains (apex.example.com, acme.example.com). You create records in a private hosted zone to define how you want to route traffic for a domain and its subdomains within one or more Amazon Virtual Private Clouds (Amazon VPCs).
You can't convert a public hosted zone to a private hosted zone or vice versa. Instead, you must create a new hosted zone with the same name and create new resource record sets.
For more information about charges for hosted zones, see Amazon Route 53 Pricing.
Note the following:
You can't create a hosted zone for a top-level domain (TLD) such as .com.
For public hosted zones, Route 53 automatically creates a default SOA record and four NS records for the zone. For more information about SOA and NS records, see NS and SOA Records that Route 53 Creates for a Hosted Zone in the Amazon Route 53 Developer Guide.
If you want to use the same name servers for multiple public hosted zones, you can optionally associate a reusable delegation set with the hosted zone. See the DelegationSetId
element.
If your domain is registered with a registrar other than Route 53, you must update the name servers with your registrar to make Route 53 the DNS service for the domain. For more information, see Migrating DNS Service for an Existing Domain to Amazon Route 53 in the Amazon Route 53 Developer Guide.
When you submit a CreateHostedZone
request, the initial status of the hosted zone is PENDING
. For public hosted zones, this means that the NS and SOA records are not yet available on all Route 53 DNS servers. When the NS and SOA records are available, the status of the zone changes to INSYNC
.
The CreateHostedZone
request requires the caller to have an ec2:DescribeVpcs
permission.
Creates a new public or private hosted zone. You create records in a public hosted zone to define how you want to route traffic on the internet for a domain, such as example.com, and its subdomains (apex.example.com, acme.example.com). You create records in a private hosted zone to define how you want to route traffic for a domain and its subdomains within one or more Amazon Virtual Private Clouds (Amazon VPCs).
You can't convert a public hosted zone to a private hosted zone or vice versa. Instead, you must create a new hosted zone with the same name and create new resource record sets.
For more information about charges for hosted zones, see Amazon Route 53 Pricing.
Note the following:
You can't create a hosted zone for a top-level domain (TLD) such as .com.
For public hosted zones, Route 53 automatically creates a default SOA record and four NS records for the zone. For more information about SOA and NS records, see NS and SOA Records that Route 53 Creates for a Hosted Zone in the Amazon Route 53 Developer Guide.
If you want to use the same name servers for multiple public hosted zones, you can optionally associate a reusable delegation set with the hosted zone. See the DelegationSetId
element.
If your domain is registered with a registrar other than Route 53, you must update the name servers with your registrar to make Route 53 the DNS service for the domain. For more information, see Migrating DNS Service for an Existing Domain to Amazon Route 53 in the Amazon Route 53 Developer Guide.
When you submit a CreateHostedZone
request, the initial status of the hosted zone is PENDING
. For public hosted zones, this means that the NS and SOA records are not yet available on all Route 53 DNS servers. When the NS and SOA records are available, the status of the zone changes to INSYNC
.
The CreateHostedZone
request requires the caller to have an ec2:DescribeVpcs
permission.
When creating private hosted zones, the Amazon VPC must belong to the same partition where the hosted zone is created. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.
The following are the supported partitions:
aws
- Amazon Web Services Regions
aws-cn
- China Regions
aws-us-gov
- Amazon Web Services GovCloud (US) Region
For more information, see Access Management in the Amazon Web Services General Reference.
Creates a new key-signing key (KSK) associated with a hosted zone. You can only have two KSKs per hosted zone.
", - "CreateQueryLoggingConfig": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.
DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:
Route 53 edge location that responded to the DNS query
Domain or subdomain that was requested
DNS record type, such as A or AAAA
DNS response code, such as NoError
or ServFail
Before you create a query logging configuration, perform the following operations.
If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically.
Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:
You must create the log group in the us-east-1 region.
You must use the same Amazon Web Services account to create the log group and the hosted zone that you want to configure query logging for.
When you create log groups for query logging, we recommend that you use a consistent prefix, for example:
/aws/route53/hosted zone name
In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.
Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of Resource
, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *
, for example:
arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*
You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI.
When Route 53 finishes creating the configuration for DNS query logging, it does the following:
Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.
Begins to send query logs to the applicable log stream.
The name of each log stream is in the following format:
hosted zone ID/edge location code
The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page.
Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide.
For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide.
For information about charges for query logs, see Amazon CloudWatch Pricing.
If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig.
Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.
DNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:
Route 53 edge location that responded to the DNS query
Domain or subdomain that was requested
DNS record type, such as A or AAAA
DNS response code, such as NoError
or ServFail
Before you create a query logging configuration, perform the following operations.
If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically.
Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:
You must create the log group in the us-east-1 region.
You must use the same Amazon Web Services account to create the log group and the hosted zone that you want to configure query logging for.
When you create log groups for query logging, we recommend that you use a consistent prefix, for example:
/aws/route53/hosted zone name
In the next step, you'll create a resource policy, which controls access to one or more log groups and the associated Amazon Web Services resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.
Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of Resource
, specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with *
, for example:
arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*
To avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:
For aws:SourceArn
, supply the hosted zone ARN used in creating the query logging configuration. For example, aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID
.
For aws:SourceAccount
, supply the account ID for the account that creates the query logging configuration. For example, aws:SourceAccount:111111111111
.
For more information, see The confused deputy problem in the Amazon Web Services IAM User Guide.
You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the Amazon Web Services SDKs, or the CLI.
When Route 53 finishes creating the configuration for DNS query logging, it does the following:
Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.
Begins to send query logs to the applicable log stream.
The name of each log stream is in the following format:
hosted zone ID/edge location code
The edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the Route 53 Product Details page.
Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see Routing Internet Traffic to Your Website or Web Application in the Amazon Route 53 Developer Guide.
For a list of the values in each query log and the format of each value, see Logging DNS Queries in the Amazon Route 53 Developer Guide.
For information about charges for query logs, see Amazon CloudWatch Pricing.
If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see DeleteQueryLoggingConfig.
Creates a delegation set (a group of four name servers) that can be reused by multiple hosted zones that were created by the same Amazon Web Services account.
You can also create a reusable delegation set that uses the four name servers that are associated with an existing hosted zone. Specify the hosted zone ID in the CreateReusableDelegationSet
request.
You can't associate a reusable delegation set with a private hosted zone.
For information about using a reusable delegation set to configure white label name servers, see Configuring White Label Name Servers.
The process for migrating existing hosted zones to use a reusable delegation set is comparable to the process for configuring white label name servers. You need to perform the following steps:
Create a reusable delegation set.
Recreate hosted zones, and reduce the TTL to 60 seconds or less.
Recreate resource record sets in the new hosted zones.
Change the registrar's name servers to use the name servers for the new hosted zones.
Monitor traffic for the website or application.
Change TTLs back to their original values.
If you want to migrate existing hosted zones to use a reusable delegation set, the existing hosted zones can't use any of the name servers that are assigned to the reusable delegation set. If one or more hosted zones do use one or more name servers that are assigned to the reusable delegation set, you can do one of the following:
For small numbers of hosted zones—up to a few hundred—it's relatively easy to create reusable delegation sets until you get one that has four name servers that don't overlap with any of the name servers in your hosted zones.
For larger numbers of hosted zones, the easiest solution is to use more than one reusable delegation set.
For larger numbers of hosted zones, you can also migrate hosted zones that have overlapping name servers to hosted zones that don't have overlapping name servers, then migrate the hosted zones again to use the reusable delegation set.
Creates a traffic policy, which you use to create multiple DNS resource record sets for one domain name (such as example.com) or one subdomain name (such as www.example.com).
", "CreateTrafficPolicyInstance": "Creates resource record sets in a specified hosted zone based on the settings in a specified traffic policy version. In addition, CreateTrafficPolicyInstance
associates the resource record sets with a specified domain name (such as example.com) or subdomain name (such as www.example.com). Amazon Route 53 responds to DNS queries for the domain or subdomain name by using the resource record sets that CreateTrafficPolicyInstance
created.
Deletes a traffic policy instance and all of the resource record sets that Amazon Route 53 created when you created the instance.
In the Route 53 console, traffic policy instances are known as policy records.
Removes authorization to submit an AssociateVPCWithHostedZone
request to associate a specified VPC with a hosted zone that was created by a different account. You must use the account that created the hosted zone to submit a DeleteVPCAssociationAuthorization
request.
Sending this request only prevents the Amazon Web Services account that created the VPC from associating the VPC with the Amazon Route 53 hosted zone in the future. If the VPC is already associated with the hosted zone, DeleteVPCAssociationAuthorization
won't disassociate the VPC from the hosted zone. If you want to delete an existing association, use DisassociateVPCFromHostedZone
.
Disables DNSSEC signing in a specific hosted zone. This action does not deactivate any key-signing keys (KSKs) that are active in the hosted zone.
", - "DisassociateVPCFromHostedZone": "Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon Route 53 private hosted zone. Note the following:
You can't disassociate the last Amazon VPC from a private hosted zone.
You can't convert a private hosted zone into a public hosted zone.
You can submit a DisassociateVPCFromHostedZone
request using either the account that created the hosted zone or the account that created the Amazon VPC.
Some services, such as Cloud Map and Amazon Elastic File System (Amazon EFS) automatically create hosted zones and associate VPCs with the hosted zones. A service can create a hosted zone using your account or using its own account. You can disassociate a VPC from a hosted zone only if the service created the hosted zone using your account.
When you run DisassociateVPCFromHostedZone, if the hosted zone has a value for OwningAccount
, you can use DisassociateVPCFromHostedZone
. If the hosted zone has a value for OwningService
, you can't use DisassociateVPCFromHostedZone
.
Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon Route 53 private hosted zone. Note the following:
You can't disassociate the last Amazon VPC from a private hosted zone.
You can't convert a private hosted zone into a public hosted zone.
You can submit a DisassociateVPCFromHostedZone
request using either the account that created the hosted zone or the account that created the Amazon VPC.
Some services, such as Cloud Map and Amazon Elastic File System (Amazon EFS) automatically create hosted zones and associate VPCs with the hosted zones. A service can create a hosted zone using your account or using its own account. You can disassociate a VPC from a hosted zone only if the service created the hosted zone using your account.
When you run DisassociateVPCFromHostedZone, if the hosted zone has a value for OwningAccount
, you can use DisassociateVPCFromHostedZone
. If the hosted zone has a value for OwningService
, you can't use DisassociateVPCFromHostedZone
.
When revoking access, the hosted zone and the Amazon VPC must belong to the same partition. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.
The following are the supported partitions:
aws
- Amazon Web Services Regions
aws-cn
- China Regions
aws-us-gov
- Amazon Web Services GovCloud (US) Region
For more information, see Access Management in the Amazon Web Services General Reference.
Enables DNSSEC signing in a specific hosted zone.
", "GetAccountLimit": "Gets the specified limit for the current account, for example, the maximum number of health checks that you can create using the account.
For the default limit, see Limits in the Amazon Route 53 Developer Guide. To request a higher limit, open a case.
You can also view account limits in Amazon Web Services Trusted Advisor. Sign in to the Amazon Web Services Management Console and open the Trusted Advisor console at https://console.aws.amazon.com/trustedadvisor/. Then choose Service limits in the navigation pane.
Returns the current status of a change batch request. The status is one of the following values:
PENDING
indicates that the changes in this request have not propagated to all Amazon Route 53 DNS servers. This is the initial status of all change batch requests.
INSYNC
indicates that the changes have propagated to all Route 53 DNS servers.
Retrieve a list of the health checks that are associated with the current Amazon Web Services account.
", "ListHostedZones": "Retrieves a list of the public and private hosted zones that are associated with the current Amazon Web Services account. The response includes a HostedZones
child element for each hosted zone.
Amazon Route 53 returns a maximum of 100 items in each response. If you have a lot of hosted zones, you can use the maxitems
parameter to list them in groups of up to 100.
Retrieves a list of your hosted zones in lexicographic order. The response includes a HostedZones
child element for each hosted zone created by the current Amazon Web Services account.
ListHostedZonesByName
sorts hosted zones by name with the labels reversed. For example:
com.example.www.
Note the trailing dot, which can change the sort order in some circumstances.
If the domain name includes escape characters or Punycode, ListHostedZonesByName
alphabetizes the domain name using the escaped or Punycoded value, which is the format that Amazon Route 53 saves in its database. For example, to create a hosted zone for exämple.com, you specify ex\\344mple.com for the domain name. ListHostedZonesByName
alphabetizes it as:
com.ex\\344mple.
The labels are reversed and alphabetized using the escaped value. For more information about valid domain name formats, including internationalized domain names, see DNS Domain Name Format in the Amazon Route 53 Developer Guide.
Route 53 returns up to 100 items in each response. If you have a lot of hosted zones, use the MaxItems
parameter to list them in groups of up to 100. The response includes values that help navigate from one group of MaxItems
hosted zones to the next:
The DNSName
and HostedZoneId
elements in the response contain the values, if any, specified for the dnsname
and hostedzoneid
parameters in the request that produced the current response.
The MaxItems
element in the response contains the value, if any, that you specified for the maxitems
parameter in the request that produced the current response.
If the value of IsTruncated
in the response is true, there are more hosted zones associated with the current Amazon Web Services account.
If IsTruncated
is false, this response includes the last hosted zone that is associated with the current account. The NextDNSName
element and NextHostedZoneId
elements are omitted from the response.
The NextDNSName
and NextHostedZoneId
elements in the response contain the domain name and the hosted zone ID of the next hosted zone that is associated with the current Amazon Web Services account. If you want to list more hosted zones, make another call to ListHostedZonesByName
, and specify the value of NextDNSName
and NextHostedZoneId
in the dnsname
and hostedzoneid
parameters, respectively.
Lists all the private hosted zones that a specified VPC is associated with, regardless of which Amazon Web Services account or Amazon Web Services service owns the hosted zones. The HostedZoneOwner
structure in the response contains one of the following values:
An OwningAccount
element, which contains the account number of either the current Amazon Web Services account or another Amazon Web Services account. Some services, such as Cloud Map, create hosted zones using the current account.
An OwningService
element, which identifies the Amazon Web Services service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner
is efs.amazonaws.com
.
Lists all the private hosted zones that a specified VPC is associated with, regardless of which Amazon Web Services account or Amazon Web Services service owns the hosted zones. The HostedZoneOwner
structure in the response contains one of the following values:
An OwningAccount
element, which contains the account number of either the current Amazon Web Services account or another Amazon Web Services account. Some services, such as Cloud Map, create hosted zones using the current account.
An OwningService
element, which identifies the Amazon Web Services service that created and owns the hosted zone. For example, if a hosted zone was created by Amazon Elastic File System (Amazon EFS), the value of Owner
is efs.amazonaws.com
.
When listing private hosted zones, the hosted zone and the Amazon VPC must belong to the same partition where the hosted zones were created. A partition is a group of Amazon Web Services Regions. Each Amazon Web Services account is scoped to one partition.
The following are the supported partitions:
aws
- Amazon Web Services Regions
aws-cn
- China Regions
aws-us-gov
- Amazon Web Services GovCloud (US) Region
For more information, see Access Management in the Amazon Web Services General Reference.
Lists the configurations for DNS query logging that are associated with the current Amazon Web Services account or the configuration that is associated with a specified hosted zone.
For more information about DNS query logs, see CreateQueryLoggingConfig. Additional information, including the format of DNS query logs, appears in Logging DNS Queries in the Amazon Route 53 Developer Guide.
", "ListResourceRecordSets": "Lists the resource record sets in a specified hosted zone.
ListResourceRecordSets
returns up to 300 resource record sets at a time in ASCII order, beginning at a position specified by the name
and type
elements.
Sort order
ListResourceRecordSets
sorts results first by DNS name with the labels reversed, for example:
com.example.www.
Note the trailing dot, which can change the sort order when the record name contains characters that appear before .
(decimal 46) in the ASCII table. These characters include the following: ! \" # $ % & ' ( ) * + , -
When multiple records have the same DNS name, ListResourceRecordSets
sorts results by the record type.
Specifying where to start listing records
You can use the name and type elements to specify the resource record set that the list begins with:
The results begin with the first resource record set that the hosted zone contains.
The results begin with the first resource record set in the list whose name is greater than or equal to Name
.
Amazon Route 53 returns the InvalidInput
error.
The results begin with the first resource record set in the list whose name is greater than or equal to Name
, and whose type is greater than or equal to Type
.
Resource record sets that are PENDING
This action returns the most current version of the records. This includes records that are PENDING
, and that are not yet available on all Route 53 DNS servers.
Changing resource record sets
To ensure that you get an accurate listing of the resource record sets for a hosted zone at a point in time, do not submit a ChangeResourceRecordSets
request while you're paging through the results of a ListResourceRecordSets
request. If you do, some pages may display results without the latest changes while other pages display results with the latest changes.
Displaying the next page of results
If a ListResourceRecordSets
command returns more than one page of results, the value of IsTruncated
is true
. To display the next page of results, get the values of NextRecordName
, NextRecordType
, and NextRecordIdentifier
(if any) from the response. Then submit another ListResourceRecordSets
request, and specify those values for StartRecordName
, StartRecordType
, and StartRecordIdentifier
.
Retrieves a list of the reusable delegation sets that are associated with the current Amazon Web Services account.
", diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index 44e3b65a78a..fd78bb64a58 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -231,7 +231,10 @@ "output":{"shape":"DeleteObjectsOutput"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/multiobjectdeleteapi.html", "alias":"DeleteMultipleObjects", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "DeletePublicAccessBlock":{ "name":"DeletePublicAccessBlock", @@ -465,7 +468,16 @@ {"shape":"NoSuchKey"}, {"shape":"InvalidObjectState"} ], - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html" + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html", + "httpChecksum":{ + "requestValidationModeMember":"ChecksumMode", + "responseAlgorithms":[ + "CRC32", + "CRC32C", + "SHA256", + "SHA1" + ] + } }, "GetObjectAcl":{ "name":"GetObjectAcl", @@ -480,6 +492,18 @@ ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGETacl.html" }, + "GetObjectAttributes":{ + "name":"GetObjectAttributes", + "http":{ + "method":"GET", + "requestUri":"/{Bucket}/{Key+}?attributes" + }, + "input":{"shape":"GetObjectAttributesRequest"}, + "output":{"shape":"GetObjectAttributesOutput"}, + "errors":[ + {"shape":"NoSuchKey"} + ] + }, "GetObjectLegalHold":{ "name":"GetObjectLegalHold", "http":{ @@ -669,7 +693,11 @@ "method":"PUT", "requestUri":"/{Bucket}?accelerate" }, - "input":{"shape":"PutBucketAccelerateConfigurationRequest"} + "input":{"shape":"PutBucketAccelerateConfigurationRequest"}, + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":false + } }, "PutBucketAcl":{ "name":"PutBucketAcl", @@ -679,7 +707,10 @@ }, "input":{"shape":"PutBucketAclRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTacl.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketAnalyticsConfiguration":{ "name":"PutBucketAnalyticsConfiguration", @@ -697,7 +728,10 @@ }, "input":{"shape":"PutBucketCorsRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTcors.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketEncryption":{ "name":"PutBucketEncryption", @@ -706,7 +740,10 @@ "requestUri":"/{Bucket}?encryption" }, "input":{"shape":"PutBucketEncryptionRequest"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketIntelligentTieringConfiguration":{ "name":"PutBucketIntelligentTieringConfiguration", @@ -733,7 +770,10 @@ "input":{"shape":"PutBucketLifecycleRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html", "deprecated":true, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketLifecycleConfiguration":{ "name":"PutBucketLifecycleConfiguration", @@ -742,7 +782,10 @@ "requestUri":"/{Bucket}?lifecycle" }, "input":{"shape":"PutBucketLifecycleConfigurationRequest"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketLogging":{ "name":"PutBucketLogging", @@ -752,7 +795,10 @@ }, "input":{"shape":"PutBucketLoggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTlogging.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketMetricsConfiguration":{ "name":"PutBucketMetricsConfiguration", @@ -771,7 +817,10 @@ "input":{"shape":"PutBucketNotificationRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTnotification.html", "deprecated":true, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketNotificationConfiguration":{ "name":"PutBucketNotificationConfiguration", @@ -788,7 +837,7 @@ "requestUri":"/{Bucket}?ownershipControls" }, "input":{"shape":"PutBucketOwnershipControlsRequest"}, - "httpChecksumRequired":true + "httpChecksum":{"requestChecksumRequired":true} }, "PutBucketPolicy":{ "name":"PutBucketPolicy", @@ -798,7 +847,10 @@ }, "input":{"shape":"PutBucketPolicyRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketReplication":{ "name":"PutBucketReplication", @@ -807,7 +859,10 @@ "requestUri":"/{Bucket}?replication" }, "input":{"shape":"PutBucketReplicationRequest"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketRequestPayment":{ "name":"PutBucketRequestPayment", @@ -817,7 +872,10 @@ }, "input":{"shape":"PutBucketRequestPaymentRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTrequestPaymentPUT.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketTagging":{ "name":"PutBucketTagging", @@ -827,7 +885,10 @@ }, "input":{"shape":"PutBucketTaggingRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTtagging.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketVersioning":{ "name":"PutBucketVersioning", @@ -837,7 +898,10 @@ }, "input":{"shape":"PutBucketVersioningRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutBucketWebsite":{ "name":"PutBucketWebsite", @@ -847,7 +911,10 @@ }, "input":{"shape":"PutBucketWebsiteRequest"}, "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutObject":{ "name":"PutObject", @@ -857,7 +924,11 @@ }, "input":{"shape":"PutObjectRequest"}, "output":{"shape":"PutObjectOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html" + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":false + } }, "PutObjectAcl":{ "name":"PutObjectAcl", @@ -871,7 +942,10 @@ {"shape":"NoSuchKey"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutObjectLegalHold":{ "name":"PutObjectLegalHold", @@ -881,7 +955,10 @@ }, "input":{"shape":"PutObjectLegalHoldRequest"}, "output":{"shape":"PutObjectLegalHoldOutput"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutObjectLockConfiguration":{ "name":"PutObjectLockConfiguration", @@ -891,7 +968,10 @@ }, "input":{"shape":"PutObjectLockConfigurationRequest"}, "output":{"shape":"PutObjectLockConfigurationOutput"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutObjectRetention":{ "name":"PutObjectRetention", @@ -901,7 +981,10 @@ }, "input":{"shape":"PutObjectRetentionRequest"}, "output":{"shape":"PutObjectRetentionOutput"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutObjectTagging":{ "name":"PutObjectTagging", @@ -911,7 +994,10 @@ }, "input":{"shape":"PutObjectTaggingRequest"}, "output":{"shape":"PutObjectTaggingOutput"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "PutPublicAccessBlock":{ "name":"PutPublicAccessBlock", @@ -920,7 +1006,10 @@ "requestUri":"/{Bucket}?publicAccessBlock" }, "input":{"shape":"PutPublicAccessBlockRequest"}, - "httpChecksumRequired":true + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":true + } }, "RestoreObject":{ "name":"RestoreObject", @@ -934,7 +1023,11 @@ {"shape":"ObjectAlreadyInActiveTierError"} ], "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "alias":"PostObjectRestore" + "alias":"PostObjectRestore", + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":false + } }, "SelectObjectContent":{ "name":"SelectObjectContent", @@ -957,7 +1050,11 @@ }, "input":{"shape":"UploadPartRequest"}, "output":{"shape":"UploadPartOutput"}, - "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html" + "documentationUrl":"http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", + "httpChecksum":{ + "requestAlgorithmMember":"ChecksumAlgorithm", + "requestChecksumRequired":false + } }, "UploadPartCopy":{ "name":"UploadPartCopy", @@ -1324,6 +1421,37 @@ } }, "CacheControl":{"type":"string"}, + "Checksum":{ + "type":"structure", + "members":{ + "ChecksumCRC32":{"shape":"ChecksumCRC32"}, + "ChecksumCRC32C":{"shape":"ChecksumCRC32C"}, + "ChecksumSHA1":{"shape":"ChecksumSHA1"}, + "ChecksumSHA256":{"shape":"ChecksumSHA256"} + } + }, + "ChecksumAlgorithm":{ + "type":"string", + "enum":[ + "CRC32", + "CRC32C", + "SHA1", + "SHA256" + ] + }, + "ChecksumAlgorithmList":{ + "type":"list", + "member":{"shape":"ChecksumAlgorithm"}, + "flattened":true + }, + "ChecksumCRC32":{"type":"string"}, + "ChecksumCRC32C":{"type":"string"}, + "ChecksumMode":{ + "type":"string", + "enum":["ENABLED"] + }, + "ChecksumSHA1":{"type":"string"}, + "ChecksumSHA256":{"type":"string"}, "CloudFunction":{"type":"string"}, "CloudFunctionConfiguration":{ "type":"structure", @@ -1367,6 +1495,10 @@ "locationName":"x-amz-expiration" }, "ETag":{"shape":"ETag"}, + "ChecksumCRC32":{"shape":"ChecksumCRC32"}, + "ChecksumCRC32C":{"shape":"ChecksumCRC32C"}, + "ChecksumSHA1":{"shape":"ChecksumSHA1"}, + "ChecksumSHA256":{"shape":"ChecksumSHA256"}, "ServerSideEncryption":{ "shape":"ServerSideEncryption", "location":"header", @@ -1422,6 +1554,26 @@ "location":"querystring", "locationName":"uploadId" }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-checksum-sha256" + }, "RequestPayer":{ "shape":"RequestPayer", "location":"header", @@ -1431,6 +1583,21 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" } }, "payload":"MultipartUpload" @@ -1448,6 +1615,10 @@ "type":"structure", "members":{ "ETag":{"shape":"ETag"}, + "ChecksumCRC32":{"shape":"ChecksumCRC32"}, + "ChecksumCRC32C":{"shape":"ChecksumCRC32C"}, + "ChecksumSHA1":{"shape":"ChecksumSHA1"}, + "ChecksumSHA256":{"shape":"ChecksumSHA256"}, "PartNumber":{"shape":"PartNumber"} } }, @@ -1565,6 +1736,11 @@ "location":"header", "locationName":"Cache-Control" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-checksum-algorithm" + }, "ContentDisposition":{ "shape":"ContentDisposition", "location":"header", @@ -1756,14 +1932,22 @@ "type":"structure", "members":{ "ETag":{"shape":"ETag"}, - "LastModified":{"shape":"LastModified"} + "LastModified":{"shape":"LastModified"}, + "ChecksumCRC32":{"shape":"ChecksumCRC32"}, + "ChecksumCRC32C":{"shape":"ChecksumCRC32C"}, + "ChecksumSHA1":{"shape":"ChecksumSHA1"}, + "ChecksumSHA256":{"shape":"ChecksumSHA256"} } }, "CopyPartResult":{ "type":"structure", "members":{ "ETag":{"shape":"ETag"}, - "LastModified":{"shape":"LastModified"} + "LastModified":{"shape":"LastModified"}, + "ChecksumCRC32":{"shape":"ChecksumCRC32"}, + "ChecksumCRC32C":{"shape":"ChecksumCRC32C"}, + "ChecksumSHA1":{"shape":"ChecksumSHA1"}, + "ChecksumSHA256":{"shape":"ChecksumSHA256"} } }, "CopySource":{ @@ -1908,6 +2092,11 @@ "shape":"RequestCharged", "location":"header", "locationName":"x-amz-request-charged" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-checksum-algorithm" } } }, @@ -2062,6 +2251,11 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-checksum-algorithm" } } }, @@ -2510,6 +2704,11 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" } }, "payload":"Delete" @@ -3281,6 +3480,118 @@ } } }, + "GetObjectAttributesOutput":{ + "type":"structure", + "members":{ + "DeleteMarker":{ + "shape":"DeleteMarker", + "location":"header", + "locationName":"x-amz-delete-marker" + }, + "LastModified":{ + "shape":"LastModified", + "location":"header", + "locationName":"Last-Modified" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"header", + "locationName":"x-amz-version-id" + }, + "RequestCharged":{ + "shape":"RequestCharged", + "location":"header", + "locationName":"x-amz-request-charged" + }, + "ETag":{"shape":"ETag"}, + "Checksum":{"shape":"Checksum"}, + "ObjectParts":{"shape":"GetObjectAttributesParts"}, + "StorageClass":{"shape":"StorageClass"}, + "ObjectSize":{"shape":"ObjectSize"} + } + }, + "GetObjectAttributesParts":{ + "type":"structure", + "members":{ + "TotalPartsCount":{ + "shape":"PartsCount", + "locationName":"PartsCount" + }, + "PartNumberMarker":{"shape":"PartNumberMarker"}, + "NextPartNumberMarker":{"shape":"NextPartNumberMarker"}, + "MaxParts":{"shape":"MaxParts"}, + "IsTruncated":{"shape":"IsTruncated"}, + "Parts":{ + "shape":"PartsList", + "locationName":"Part" + } + } + }, + "GetObjectAttributesRequest":{ + "type":"structure", + "required":[ + "Bucket", + "Key", + "ObjectAttributes" + ], + "members":{ + "Bucket":{ + "shape":"BucketName", + "location":"uri", + "locationName":"Bucket" + }, + "Key":{ + "shape":"ObjectKey", + "location":"uri", + "locationName":"Key" + }, + "VersionId":{ + "shape":"ObjectVersionId", + "location":"querystring", + "locationName":"versionId" + }, + "MaxParts":{ + "shape":"MaxParts", + "location":"header", + "locationName":"x-amz-max-parts" + }, + "PartNumberMarker":{ + "shape":"PartNumberMarker", + "location":"header", + "locationName":"x-amz-part-number-marker" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" + }, + "RequestPayer":{ + "shape":"RequestPayer", + "location":"header", + "locationName":"x-amz-request-payer" + }, + "ExpectedBucketOwner":{ + "shape":"AccountId", + "location":"header", + "locationName":"x-amz-expected-bucket-owner" + }, + "ObjectAttributes":{ + "shape":"ObjectAttributesList", + "location":"header", + "locationName":"x-amz-object-attributes" + } + } + }, "GetObjectLegalHoldOutput":{ "type":"structure", "members":{ @@ -3387,6 +3698,26 @@ "location":"header", "locationName":"ETag" }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-checksum-sha256" + }, "MissingMeta":{ "shape":"MissingMeta", "location":"header", @@ -3616,6 +3947,11 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "ChecksumMode":{ + "shape":"ChecksumMode", + "location":"header", + "locationName":"x-amz-checksum-mode" } } }, @@ -3873,6 +4209,26 @@ "location":"header", "locationName":"Content-Length" }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-checksum-sha256" + }, "ETag":{ "shape":"ETag", "location":"header", @@ -4066,6 +4422,11 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "ChecksumMode":{ + "shape":"ChecksumMode", + "location":"header", + "locationName":"x-amz-checksum-mode" } } }, @@ -4254,7 +4615,8 @@ "ObjectLockMode", "ObjectLockLegalHoldStatus", "IntelligentTieringAccessTier", - "BucketKeyStatus" + "BucketKeyStatus", + "ChecksumAlgorithm" ] }, "InventoryOptionalFields":{ @@ -4843,7 +5205,8 @@ "shape":"RequestCharged", "location":"header", "locationName":"x-amz-request-charged" - } + }, + "ChecksumAlgorithm":{"shape":"ChecksumAlgorithm"} } }, "ListPartsRequest":{ @@ -4888,6 +5251,21 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "SSECustomerAlgorithm":{ + "shape":"SSECustomerAlgorithm", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-algorithm" + }, + "SSECustomerKey":{ + "shape":"SSECustomerKey", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key" + }, + "SSECustomerKeyMD5":{ + "shape":"SSECustomerKeyMD5", + "location":"header", + "locationName":"x-amz-server-side-encryption-customer-key-MD5" } } }, @@ -5007,7 +5385,8 @@ "Initiated":{"shape":"Initiated"}, "StorageClass":{"shape":"StorageClass"}, "Owner":{"shape":"Owner"}, - "Initiator":{"shape":"Initiator"} + "Initiator":{"shape":"Initiator"}, + "ChecksumAlgorithm":{"shape":"ChecksumAlgorithm"} } }, "MultipartUploadId":{"type":"string"}, @@ -5102,6 +5481,7 @@ "Key":{"shape":"ObjectKey"}, "LastModified":{"shape":"LastModified"}, "ETag":{"shape":"ETag"}, + "ChecksumAlgorithm":{"shape":"ChecksumAlgorithmList"}, "Size":{"shape":"Size"}, "StorageClass":{"shape":"ObjectStorageClass"}, "Owner":{"shape":"Owner"} @@ -5113,6 +5493,20 @@ }, "exception":true }, + "ObjectAttributes":{ + "type":"string", + "enum":[ + "ETag", + "Checksum", + "ObjectParts", + "StorageClass", + "ObjectSize" + ] + }, + "ObjectAttributesList":{ + "type":"list", + "member":{"shape":"ObjectAttributes"} + }, "ObjectCannedACL":{ "type":"string", "enum":[ @@ -5218,6 +5612,18 @@ "BucketOwnerEnforced" ] }, + "ObjectPart":{ + "type":"structure", + "members":{ + "PartNumber":{"shape":"PartNumber"}, + "Size":{"shape":"Size"}, + "ChecksumCRC32":{"shape":"ChecksumCRC32"}, + "ChecksumCRC32C":{"shape":"ChecksumCRC32C"}, + "ChecksumSHA1":{"shape":"ChecksumSHA1"}, + "ChecksumSHA256":{"shape":"ChecksumSHA256"} + } + }, + "ObjectSize":{"type":"long"}, "ObjectSizeGreaterThanBytes":{"type":"long"}, "ObjectSizeLessThanBytes":{"type":"long"}, "ObjectStorageClass":{ @@ -5238,6 +5644,7 @@ "type":"structure", "members":{ "ETag":{"shape":"ETag"}, + "ChecksumAlgorithm":{"shape":"ChecksumAlgorithmList"}, "Size":{"shape":"Size"}, "StorageClass":{"shape":"ObjectVersionStorageClass"}, "Key":{"shape":"ObjectKey"}, @@ -5314,7 +5721,11 @@ "PartNumber":{"shape":"PartNumber"}, "LastModified":{"shape":"LastModified"}, "ETag":{"shape":"ETag"}, - "Size":{"shape":"Size"} + "Size":{"shape":"Size"}, + "ChecksumCRC32":{"shape":"ChecksumCRC32"}, + "ChecksumCRC32C":{"shape":"ChecksumCRC32C"}, + "ChecksumSHA1":{"shape":"ChecksumSHA1"}, + "ChecksumSHA256":{"shape":"ChecksumSHA256"} } }, "PartNumber":{"type":"integer"}, @@ -5325,6 +5736,11 @@ "flattened":true }, "PartsCount":{"type":"integer"}, + "PartsList":{ + "type":"list", + "member":{"shape":"ObjectPart"}, + "flattened":true + }, "Payer":{ "type":"string", "enum":[ @@ -5421,6 +5837,11 @@ "shape":"AccountId", "location":"header", "locationName":"x-amz-expected-bucket-owner" + }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" } }, "payload":"AccelerateConfiguration" @@ -5449,6 +5870,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "GrantFullControl":{ "shape":"GrantFullControl", "location":"header", @@ -5535,6 +5961,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ExpectedBucketOwner":{ "shape":"AccountId", "location":"header", @@ -5560,6 +5991,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ServerSideEncryptionConfiguration":{ "shape":"ServerSideEncryptionConfiguration", "locationName":"ServerSideEncryptionConfiguration", @@ -5639,6 +6075,11 @@ "location":"uri", "locationName":"Bucket" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "LifecycleConfiguration":{ "shape":"BucketLifecycleConfiguration", "locationName":"LifecycleConfiguration", @@ -5666,6 +6107,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "LifecycleConfiguration":{ "shape":"LifecycleConfiguration", "locationName":"LifecycleConfiguration", @@ -5701,6 +6147,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ExpectedBucketOwner":{ "shape":"AccountId", "location":"header", @@ -5787,6 +6238,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "NotificationConfiguration":{ "shape":"NotificationConfigurationDeprecated", "locationName":"NotificationConfiguration", @@ -5847,6 +6303,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ConfirmRemoveSelfBucketAccess":{ "shape":"ConfirmRemoveSelfBucketAccess", "location":"header", @@ -5878,6 +6339,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ReplicationConfiguration":{ "shape":"ReplicationConfiguration", "locationName":"ReplicationConfiguration", @@ -5913,6 +6379,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "RequestPaymentConfiguration":{ "shape":"RequestPaymentConfiguration", "locationName":"RequestPaymentConfiguration", @@ -5943,6 +6414,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "Tagging":{ "shape":"Tagging", "locationName":"Tagging", @@ -5973,6 +6449,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "MFA":{ "shape":"MFA", "location":"header", @@ -6008,6 +6489,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "WebsiteConfiguration":{ "shape":"WebsiteConfiguration", "locationName":"WebsiteConfiguration", @@ -6058,6 +6544,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "GrantFullControl":{ "shape":"GrantFullControl", "location":"header", @@ -6153,6 +6644,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ExpectedBucketOwner":{ "shape":"AccountId", "location":"header", @@ -6200,6 +6696,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ExpectedBucketOwner":{ "shape":"AccountId", "location":"header", @@ -6221,6 +6722,26 @@ "location":"header", "locationName":"ETag" }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-checksum-sha256" + }, "ServerSideEncryption":{ "shape":"ServerSideEncryption", "location":"header", @@ -6319,6 +6840,31 @@ "location":"header", "locationName":"Content-Type" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-checksum-sha256" + }, "Expires":{ "shape":"Expires", "location":"header", @@ -6484,6 +7030,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ExpectedBucketOwner":{ "shape":"AccountId", "location":"header", @@ -6530,6 +7081,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "Tagging":{ "shape":"Tagging", "locationName":"Tagging", @@ -6565,6 +7121,11 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "PublicAccessBlockConfiguration":{ "shape":"PublicAccessBlockConfiguration", "locationName":"PublicAccessBlockConfiguration", @@ -6854,6 +7415,11 @@ "location":"header", "locationName":"x-amz-request-payer" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, "ExpectedBucketOwner":{ "shape":"AccountId", "location":"header", @@ -7488,6 +8054,26 @@ "location":"header", "locationName":"ETag" }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-checksum-sha256" + }, "SSECustomerAlgorithm":{ "shape":"SSECustomerAlgorithm", "location":"header", @@ -7543,6 +8129,31 @@ "location":"header", "locationName":"Content-MD5" }, + "ChecksumAlgorithm":{ + "shape":"ChecksumAlgorithm", + "location":"header", + "locationName":"x-amz-sdk-checksum-algorithm" + }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-checksum-sha256" + }, "Key":{ "shape":"ObjectKey", "location":"uri", @@ -7693,6 +8304,26 @@ "location":"header", "locationName":"x-amz-fwd-header-Content-Type" }, + "ChecksumCRC32":{ + "shape":"ChecksumCRC32", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-checksum-crc32" + }, + "ChecksumCRC32C":{ + "shape":"ChecksumCRC32C", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-checksum-crc32c" + }, + "ChecksumSHA1":{ + "shape":"ChecksumSHA1", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-checksum-sha1" + }, + "ChecksumSHA256":{ + "shape":"ChecksumSHA256", + "location":"header", + "locationName":"x-amz-fwd-header-x-amz-checksum-sha256" + }, "DeleteMarker":{ "shape":"DeleteMarker", "location":"header", diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index a977bc17931..76be6eb77b2 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -4,9 +4,9 @@ "operations": { "AbortMultipartUpload": "This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts action and ensure that the parts list is empty.
For information about permissions required to use the multipart upload, see Multipart Upload and Permissions.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag
value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.
You cannot use Content-Type: application/x-www-form-urlencoded
with Complete Multipart Upload requests. Also, if you do not provide a Content-Type
header, CompleteMultipartUpload
returns a 200 OK response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions.
CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
400 Bad Request
Error code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
400 Bad Request
Error code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
400 Bad Request
Error code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
404 Not Found
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
Metadata
When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-copy-source-if
Headers
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Server-side encryption
When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with Amazon Web Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
Access Control List (ACL)-Specific Request Headers
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control
canned ACL or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.
Storage Class Options
You can use the CopyObject
action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 User Guide.
Versioning
By default, x-amz-copy-source
identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.
The following operations are related to CopyObject
:
For more information, see Copying Objects.
", + "CopyObject": "Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
Metadata
When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-copy-source-if Headers
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Server-side encryption
When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with Amazon Web Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
Access Control List (ACL)-Specific Request Headers
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control
canned ACL or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.
Checksums
When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you may optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm
header.
Storage Class Options
You can use the CopyObject
action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 User Guide.
Versioning
By default, x-amz-copy-source
identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.
The following operations are related to CopyObject
:
For more information, see Copying Objects.
", "CreateBucket": "Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Bucket naming rules.
If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.
If you send your create bucket request to the s3.amazonaws.com
endpoint, the request goes to the us-east-1 Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1 as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.
Access control lists (ACLs)
When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or groups that should be granted specific permissions on the bucket.
If your CreateBucket request sets bucket owner enforced for S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400
error and returns the InvalidBucketAclWithObjectOwnership
error code. For more information, see Controlling object ownership in the Amazon S3 User Guide.
There are two ways to grant the appropriate permissions using the request headers.
Specify a canned ACL using the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly using the x-amz-grant-read
, x-amz-grant-write
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For more information, see Access control list (ACL) overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Permissions
In addition to s3:CreateBucket
, the following permissions are required when your CreateBucket includes specific headers:
ACLs - If your CreateBucket
request specifies ACL permissions and the ACL is public-read, public-read-write, authenticated-read, or if you specify access permissions explicitly through any other ACL, both s3:CreateBucket
and s3:PutBucketAcl
permissions are needed. If the ACL the CreateBucket
request is private or doesn't specify any ACLs, only s3:CreateBucket
permission is needed.
Object Lock - If ObjectLockEnabledForBucket
is set to true in your CreateBucket
request, s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket request includes the the x-amz-object-ownership
header, s3:PutBucketOwnershipControls
permission is required.
The following operations are related to CreateBucket
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
For information about the permissions required to use the multipart upload API, see Multipart Upload and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use Amazon Web Services managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer managed key stored in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key in Amazon Web Services KMS to protect the data.
All GET and PUT requests for an object protected by Amazon Web Services KMS fail if you don't make them with SSL or by using SigV4.
For more information about server-side encryption with KMS key (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific Amazon Web Services accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
The following operations are related to CreateMultipartUpload
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
For information about the permissions required to use the multipart upload API, see Multipart Upload and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use Amazon Web Services managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer managed key stored in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key in Amazon Web Services KMS to protect the data.
All GET and PUT requests for an object protected by Amazon Web Services KMS fail if you don't make them with SSL or by using SigV4.
For more information about server-side encryption with KMS key (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific Amazon Web Services accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
The following operations are related to CreateMultipartUpload
:
Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.
Related Resources
", "DeleteBucketAnalyticsConfiguration": "Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to DeleteBucketAnalyticsConfiguration
:
Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
Related Resources:
", @@ -27,7 +27,7 @@ "GetBucketAccelerateConfiguration": "This implementation of the GET action uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon S3 User Guide.
Related Resources
", "GetBucketAcl": "This implementation of the GET
action uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
Related Resources
", "GetBucketAnalyticsConfiguration": "This implementation of the GET action returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon S3 User Guide.
Related Resources
Returns the cors configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:GetBucketCORS action. By default, the bucket owner has this permission and can grant it to others.
For more information about cors, see Enabling Cross-Origin Resource Sharing.
The following operations are related to GetBucketCors
:
Returns the Cross-Origin Resource Sharing (CORS) configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:GetBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
For more information about CORS, see Enabling Cross-Origin Resource Sharing.
The following operations are related to GetBucketCors
:
Returns the default encryption configuration for an Amazon S3 bucket. If the bucket does not have a default encryption configuration, GetBucketEncryption returns ServerSideEncryptionConfigurationNotFoundError
.
For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption.
To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The following operations are related to GetBucketEncryption
:
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to GetBucketIntelligentTieringConfiguration
include:
Returns an inventory configuration (identified by the inventory configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory.
The following operations are related to GetBucketInventoryConfiguration
:
Retrieves the policy status for an Amazon S3 bucket, indicating whether the bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
For more information about when Amazon S3 considers a bucket public, see The Meaning of \"Public\".
The following operations are related to GetBucketPolicyStatus
:
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication in the Amazon S3 User Guide.
This action requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
For information about GetBucketReplication
errors, see List of replication-related error codes
The following operations are related to GetBucketReplication
:
Returns the request payment configuration of a bucket. To use this version of the operation, you must be the bucket owner. For more information, see Requester Pays Buckets.
The following operations are related to GetBucketRequestPayment
:
Returns the tag set associated with the bucket.
To use this operation, you must have permission to perform the s3:GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSetError
Description: There is no tag set associated with the bucket.
The following operations are related to GetBucketTagging
:
Returns the tag set associated with the bucket.
To use this operation, you must have permission to perform the s3:GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSet
Description: There is no tag set associated with the bucket.
The following operations are related to GetBucketTagging
:
Returns the versioning state of a bucket.
To retrieve the versioning state of a bucket, you must be the bucket owner.
This implementation also returns the MFA Delete status of the versioning state. If the MFA Delete status is enabled
, the bucket owner must use an authentication device to change the versioning state of the bucket.
The following operations are related to GetBucketVersioning
:
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET action requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to DeleteBucketWebsite
:
Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object. If you grant READ
access to the anonymous user, you can return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object in the GET
operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the resource as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the resource as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectStateError
error. For information about restoring archived objects, see Restoring Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Assuming you have the relevant permission to read object tags, the response also returns the x-amz-tagging-count
header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.
Permissions
You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.
Versioning
By default, the GET action returns the current version of an object. To return a different version, use the versionId
subresource.
If you supply a versionId
, you need the s3:GetObjectVersion
permission to access a specific version of an object. If you request a specific version, you do not need to have the s3:GetObject
permission.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
For more information about versioning, see PutBucketVersioning.
Overriding Response Header Values
There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.
You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type
, Content-Language
, Expires
, Cache-Control
, Content-Disposition
, and Content-Encoding
. To override these header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.
response-content-type
response-content-language
response-expires
response-cache-control
response-content-disposition
response-content-encoding
Additional Considerations about Request Headers
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
condition evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows: If-None-Match
condition evaluates to false
, and; If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
The following operations are related to GetObject
:
Returns the access control list (ACL) of an object. To use this operation, you must have READ_ACP
access to the object.
This action is not supported by Amazon S3 on Outposts.
Versioning
By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
The following operations are related to GetObjectAcl
:
Gets an object's current Legal Hold status. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
", - "GetObjectLockConfiguration": "Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
", - "GetObjectRetention": "Retrieves an object's retention settings. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
", - "GetObjectTagging": "Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following action is related to GetObjectTagging
:
Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object. If you grant READ
access to the anonymous user, you can return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object in the GET
operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the resource as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the resource as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification.
For more information about returning the ACL of an object, see GetObjectAcl.
If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectStateError
error. For information about restoring archived objects, see Restoring Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Assuming you have the relevant permission to read object tags, the response also returns the x-amz-tagging-count
header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.
Permissions
You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.
Versioning
By default, the GET action returns the current version of an object. To return a different version, use the versionId
subresource.
If you supply a versionId
, you need the s3:GetObjectVersion
permission to access a specific version of an object. If you request a specific version, you do not need to have the s3:GetObject
permission.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
For more information about versioning, see PutBucketVersioning.
Overriding Response Header Values
There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition
response header value in your GET request.
You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type
, Content-Language
, Expires
, Cache-Control
, Content-Disposition
, and Content-Encoding
. To override these header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.
response-content-type
response-content-language
response-expires
response-cache-control
response-content-disposition
response-content-encoding
Additional Considerations about Request Headers
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
condition evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows: If-None-Match
condition evaluates to false
, and; If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
The following operations are related to GetObject
:
Returns the access control list (ACL) of an object. To use this operation, you must have s3:GetObjectAcl
permissions or READ_ACP
access to the object. For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide
This action is not supported by Amazon S3 on Outposts.
Versioning
By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control
ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.
The following operations are related to GetObjectAcl
:
Retrieves all the metadata from an object without returning the object itself. This action is useful if you're interested only in an object's metadata. To use GetObjectAttributes
, you must have READ access to the object.
GetObjectAttributes
combines the functionality of GetObjectAcl
, GetObjectLegalHold
, GetObjectLockConfiguration
, GetObjectRetention
, GetObjectTagging
, HeadObject
, and ListParts
. All of the data returned with each of those individual calls can be returned with a single call to GetObjectAttributes
.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
Encryption request headers, such as x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with Amazon Web Services KMS keys stored in Amazon Web Services Key Management Service (SSE-KMS) or server-side encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does use these types of keys, you'll get an HTTP 400 Bad Request
error.
The last modified property in this case is the creation date of the object.
Consider the following when using request headers:
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 200 OK
and the data requested:
If-Match
condition evaluates to true
.
If-Unmodified-Since
condition evaluates to false
.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows, then Amazon S3 returns the HTTP status code 304 Not Modified
:
If-None-Match
condition evaluates to false
.
If-Modified-Since
condition evaluates to true
.
For more information about conditional requests, see RFC 7232.
Permissions
The permissions that you need to use this operation depend on whether the bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion
and s3:GetObjectVersionAttributes
permissions for this operation. If the bucket is not versioned, you need the s3:GetObject
and s3:GetObjectAttributes
permissions. For more information, see Specifying Permissions in a Policy in the Amazon S3 User Guide. If the object that you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found
(\"no such key\") error.
If you don't have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 Forbidden
(\"access denied\") error.
The following actions are related to GetObjectAttributes
:
Gets an object's current legal hold status. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
The following action is related to GetObjectLegalHold
:
Gets the Object Lock configuration for a bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
The following action is related to GetObjectLockConfiguration
:
Retrieves an object's retention settings. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
The following action is related to GetObjectRetention
:
Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following actions are related to GetObjectTagging
:
Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.
You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.
To use GET, you must have READ access to the object.
This action is not supported by Amazon S3 on Outposts.
The following action is related to GetObjectTorrent
:
Retrieves the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to GetPublicAccessBlock
:
This action is useful to determine if a bucket exists and you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 404 Not Found
or 403 Forbidden
code. A message body is not included, so you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
To use this API against an access point, you must provide the alias of the access point in place of the bucket name or specify the access point ARN. When using the access point ARN, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For more information see, Using access points.
", - "HeadObject": "The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
A HEAD
request has the same options as a GET
action on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic 404 Not Found
or 403 Forbidden
code. It is not possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
The last modified property in this case is the creation date of the object.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Consider the following when using request headers:
Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows:
If-Match
condition evaluates to true
, and;
If-Unmodified-Since
condition evaluates to false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows:
If-None-Match
condition evaluates to false
, and;
If-Modified-Since
condition evaluates to true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
Permissions
You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.
The following action is related to HeadObject
:
The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
A HEAD
request has the same options as a GET
action on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic 404 Not Found
or 403 Forbidden
code. It is not possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
The last modified property in this case is the creation date of the object.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Consider the following when using request headers:
Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows:
If-Match
condition evaluates to true
, and;
If-Unmodified-Since
condition evaluates to false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows:
If-None-Match
condition evaluates to false
, and;
If-Modified-Since
condition evaluates to true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
Permissions
You need the relevant read object (or version) permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.
The following actions are related to HeadObject
:
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
Lists the S3 Intelligent-Tiering configuration from the specified bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to ListBucketIntelligentTieringConfigurations
include:
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
Returns a list of all buckets owned by the authenticated sender of the request.
", + "ListBuckets": "Returns a list of all buckets owned by the authenticated sender of the request. To use this operation, you must have the s3:ListAllMyBuckets
permission.
This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with the value true. To list the additional multipart uploads, use the key-marker
and upload-id-marker
request parameters.
In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.
The following operations are related to ListMultipartUploads
:
Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.
To use this operation, you must have permissions to perform the s3:ListBucketVersions
action. Be aware of the name difference.
A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.
To use this operation, you must have READ access to the bucket.
This action is not supported by Amazon S3 on Outposts.
The following operations are related to ListObjectVersions
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list. For more information about listing objects, see Listing object keys programmatically
To use this operation, you must have READ access to the bucket.
To use this action in an Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
This section describes the latest revision of this action. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
To get a list of your buckets, see ListBuckets.
The following operations are related to ListObjectsV2
:
Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.
The following operations are related to ListParts
:
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
If the upload was created using a checksum algorithm, you will need to have permission to the kms:Decrypt
action for the request to succeed.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.
The following operations are related to ListParts
:
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
Specify the ACL in the request body
Specify permissions using request headers
You cannot specify access permission using both the body and the request headers.
Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl
. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-write
header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
Related Resources
", "PutBucketAnalyticsConfiguration": "Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.
You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport
request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
Special Errors
HTTP Error: HTTP 400 Bad Request
Code: InvalidArgument
Cause: Invalid argument.
HTTP Error: HTTP 400 Bad Request
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Error: HTTP 403 Forbidden
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.
Related Resources
Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
Related Resources
", - "PutBucketEncryption": "This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Related Resources
", + "PutBucketEncryption": "This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. When the default encryption is SSE-KMS, if you upload an object to the bucket and do not specify the KMS key to use for encryption, Amazon S3 uses the default Amazon Web Services managed KMS key for your account. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Related Resources
", "PutBucketIntelligentTieringConfiguration": "Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access or Deep Archive Access tier.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration
bucket permission to set the configuration on the bucket.
This implementation of the PUT
action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same Amazon Web Services Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon S3 User Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
Related Resources
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
Related Resources
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the Amazon Web Services account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon S3 User Guide:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
Rules
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:
Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.
Status whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
Permissions
By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration permission.
You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
The following are related to PutBucketLifecycleConfiguration
:
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. Keep in mind that this will overwrite an existing lifecycle configuration, so if you want to retain any configuration details, they must be included in the new lifecycle configuration. For information about lifecycle configuration, see Managing your storage lifecycle.
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The previous version of the API supported filtering based only on an object key name prefix, which is supported for backward compatibility. For the related API description, see PutBucketLifecycle.
Rules
You specify the lifecycle configuration in your request body. The lifecycle configuration is specified as XML consisting of one or more rules. Each rule consists of the following:
Filter identifying a subset of objects to which the rule applies. The filter can be based on a key name prefix, object tags, or a combination of both.
Status whether the rule is in effect.
One or more lifecycle transition and expiration actions that you want Amazon S3 to perform on the objects identified by the filter. If the state of your bucket is versioning-enabled or versioning-suspended, you can have many versions of the same object (one current version and zero or more noncurrent versions). Amazon S3 provides predefined actions that you can specify for current and noncurrent object versions.
For more information, see Object Lifecycle Management and Lifecycle Configuration Elements.
Permissions
By default, all Amazon S3 resources are private, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration). Only the resource owner (that is, the Amazon Web Services account that created it) can access the resource. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, a user must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit deny also supersedes any other permissions. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to Your Amazon S3 Resources.
The following are related to PutBucketLifecycleConfiguration
:
Set the logging parameters for a bucket and to specify permissions for who can view and modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the source bucket. To set the logging status of a bucket, you must be the bucket owner.
The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee
request element to grant access to other people. The Permissions
request element specifies the kind of access the grantee has to the logs.
If the target bucket for log delivery uses the bucket owner enforced setting for S3 Object Ownership, you can't use the Grantee
request element to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the Amazon S3 User Guide.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress></Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
To enable logging, you use LoggingEnabled and its children request elements. To disable logging, you use an empty BucketLoggingStatus request element:
<BucketLoggingStatus xmlns=\"http://doc.s3.amazonaws.com/2006-03-01\" />
For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.
For more information about creating a bucket, see CreateBucket. For more information about returning the logging status of a bucket, see GetBucketLogging.
The following operations are related to PutBucketLogging
:
Sets a metrics configuration (specified by the metrics configuration ID) for the bucket. You can have up to 1,000 metrics configurations per bucket. If you're updating an existing metrics configuration, note that this is a full replacement of the existing metrics configuration. If you don't include the elements you want to keep, they are erased.
To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to PutBucketMetricsConfiguration
:
GetBucketLifecycle
has the following special error:
Error code: TooManyConfigurations
Description: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Status Code: HTTP 400 Bad Request
No longer used, see the PutBucketNotificationConfiguration operation.
", - "PutBucketNotificationConfiguration": "Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This action replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.
Responses
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following action is related to PutBucketNotificationConfiguration
:
Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This action replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.
Responses
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following action is related to PutBucketNotificationConfiguration
:
Creates or modifies OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.
For information about Amazon S3 Object Ownership, see Using object ownership.
The following operations are related to PutBucketOwnershipControls
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the Amazon Web Services account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information, see Bucket policy examples.
The following operations are related to PutBucketPolicy
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 User Guide.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
Handling Replication of Encrypted Objects
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of replication-related error codes
Permissions
To create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
permissions for the bucket.
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
To perform this operation, the user or role performing the action must have the iam:PassRole permission.
The following operations are related to PutBucketReplication
:
Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.
The following operations are related to PutBucketRequestPayment
:
Sets the tags for a bucket.
Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.
When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and Amazon Web Services-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional action is currently in progress against this resource. Please try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the versioning state of an existing bucket. To set the versioning state, you must be the bucket owner.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
If the bucket owner enables MFA Delete in the bucket versioning configuration, the bucket owner must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
Related Resources
", + "PutBucketVersioning": "Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle policy in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle policy will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
Related Resources
", "PutBucketWebsite": "Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.
", "PutObject": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.
To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
To successfully complete the PutObject
request, you must have the s3:PutObject
in your IAM permissions.
To successfully change the objects acl of your PutObject
request, you must have the s3:PutObjectAcl
in your IAM permissions.
The Content-MD5
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.
Server-side Encryption
You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use Amazon Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.
If you request server-side encryption using Amazon Web Services Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
Access Control List (ACL)-Specific Request Headers
You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control
canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400
error with the error code AccessControlListNotSupported
.
For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.
Storage Class Options
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
Versioning
If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.
For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.
Related Resources
", "PutObjectAcl": "Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.
This action is not supported by Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
Versioning
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
Related Resources
", - "PutObjectLegalHold": "Applies a Legal Hold configuration to the specified object. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
", + "PutObjectLegalHold": "Applies a legal hold configuration to the specified object. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
", "PutObjectLockConfiguration": "Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
The DefaultRetention
settings require both a mode and a period.
The DefaultRetention
period can be either Days
or Years
but you must select one. You cannot specify Days
and Years
at the same time.
You can only enable Object Lock for new buckets. If you want to turn on Object Lock for an existing bucket, contact Amazon Web Services Support.
Places an Object Retention configuration on an object. For more information, see Locking Objects. Users or accounts require the s3:PutObjectRetention
permission in order to place an Object Retention configuration on objects. Bypassing a Governance Retention configuration requires the s3:BypassGovernanceRetention
permission.
This action is not supported by Amazon S3 on Outposts.
Permissions
When the Object Lock retention mode is set to compliance, you need s3:PutObjectRetention
and s3:BypassGovernanceRetention
permissions. For other requests to PutObjectRetention
, only s3:PutObjectRetention
permissions are required.
Places an Object Retention configuration on an object. For more information, see Locking Objects. Users or accounts require the s3:PutObjectRetention
permission in order to place an Object Retention configuration on objects. Bypassing a Governance Retention configuration requires the s3:BypassGovernanceRetention
permission.
This action is not supported by Amazon S3 on Outposts.
", "PutObjectTagging": "Sets the supplied tag-set to an object that already exists in a bucket.
A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
Special Errors
Code: InvalidTagError
Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
Code: MalformedXMLError
Cause: The XML provided does not match the schema.
Code: OperationAbortedError
Cause: A conflicting conditional action is currently in progress against this resource. Please try again.
Code: InternalError
Cause: The service was unable to apply the provided tag to the object.
Related Resources
", "PutPublicAccessBlock": "Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
Related Resources
Restores an archived copy of an object back into Amazon S3
This action is not supported by Amazon S3 on Outposts.
This action performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Querying Archives with Select Requests
You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon S3 User Guide.
When making a select request, do the following:
Define an output location for the select query's output. This must be an Amazon S3 bucket in the same Amazon Web Services Region as the bucket that contains the archive object that is being queried. The Amazon Web Services account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon S3 User Guide.
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon S3 User Guide
Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 User Guide.
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
Restoring objects
Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.
Responses
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special Errors
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
Related Resources
SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 User Guide
Restores an archived copy of an object back into Amazon S3
This action is not supported by Amazon S3 on Outposts.
This action performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Querying Archives with Select Requests
You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon S3 User Guide.
When making a select request, do the following:
Define an output location for the select query's output. This must be an Amazon S3 bucket in the same Amazon Web Services Region as the bucket that contains the archive object that is being queried. The Amazon Web Services account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon S3 User Guide.
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon S3 User Guide
Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 User Guide.
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
Restoring objects
Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.
Responses
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special Errors
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
Related Resources
SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 User Guide
This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This action is not supported by Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.
For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon S3 User Guide.
Permissions
You must have s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.
Object Data Formats
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.
Working with the Response Body
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response.
GetObject Support
The SelectObjectContent
action does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For more information, about storage classes see Storage Classes in the Amazon S3 User Guide.
Special Errors
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
Related Resources
", "UploadPart": "Uploads a part in a multipart upload.
In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.
To ensure that data is not corrupted when traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .
For information on the permissions required to use the multipart upload API, go to Multipart Upload and Permissions in the Amazon S3 User Guide.
You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the Amazon Web Services managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon S3 User Guide.
Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Related Resources
", - "UploadPartCopy": "Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source
in your request and a byte range by adding the request header x-amz-copy-source-range
in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon S3 User Guide.
Instead of using an existing object as part data, you might use the UploadPart action and provide data in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.
For more information about using the UploadPartCopy
operation, see the following:
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
For information about copying objects using a single atomic action vs. the multipart upload, see Operations on Objects in the Amazon S3 User Guide.
For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
, x-amz-copy-source-if-unmodified-since
, and x-amz-copy-source-if-modified-since
:
Consideration 1 - If both of the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request as follows:
x-amz-copy-source-if-match
condition evaluates to true
, and;
x-amz-copy-source-if-unmodified-since
condition evaluates to false
;
Amazon S3 returns 200 OK
and copies the data.
Consideration 2 - If both of the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request as follows:
x-amz-copy-source-if-none-match
condition evaluates to false
, and;
x-amz-copy-source-if-modified-since
condition evaluates to true
;
Amazon S3 returns 412 Precondition Failed
response code.
Versioning
If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source
identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source
and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the versionId
subresource as shown in the following example:
x-amz-copy-source: /bucket/object?versionId=version id
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Code: InvalidRequest
Cause: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Related Resources
Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source
in your request and a byte range by adding the request header x-amz-copy-source-range
in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon S3 User Guide.
Instead of using an existing object as part data, you might use the UploadPart action and provide data in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.
For more information about using the UploadPartCopy
operation, see the following:
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.
For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.
For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy
operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
, x-amz-copy-source-if-unmodified-since
, and x-amz-copy-source-if-modified-since
:
Consideration 1 - If both of the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request as follows:
x-amz-copy-source-if-match
condition evaluates to true
, and;
x-amz-copy-source-if-unmodified-since
condition evaluates to false
;
Amazon S3 returns 200 OK
and copies the data.
Consideration 2 - If both of the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request as follows:
x-amz-copy-source-if-none-match
condition evaluates to false
, and;
x-amz-copy-source-if-modified-since
condition evaluates to true
;
Amazon S3 returns 412 Precondition Failed
response code.
Versioning
If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source
identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source
and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the versionId
subresource as shown in the following example:
x-amz-copy-source: /bucket/object?versionId=version id
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Code: InvalidRequest
Cause: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Related Resources
Passes transformed objects to a GetObject
operation when using Object Lambda access points. For information about Object Lambda access points, see Transforming objects with Object Lambda access points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to RequestRoute
, RequestToken
, StatusCode
, ErrorCode
, and ErrorMessage
. The GetObject
response metadata is supported so that the WriteGetObjectResponse
caller, typically an Lambda function, can provide the same metadata when it internally invokes GetObject
. When WriteGetObjectResponse
is called by a customer-owned Lambda function, the metadata returned to the end user GetObject
call might differ from what Amazon S3 would normally return.
You can include any number of metadata headers. When including a metadata header, it should be prefaced with x-amz-meta
. For example, x-amz-meta-my-custom-header: MyCustomValue
. The primary use case for this is to forward GetObject
metadata.
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact personally identifiable information (PII) and decompress S3 objects. These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point.
Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
For information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.
" }, "shapes": { @@ -161,106 +162,107 @@ "AccessPointArn": { "base": null, "refs": { - "MetricsAndOperator$AccessPointArn": "The access point ARN used when evaluating an AND predicate.
", + "MetricsAndOperator$AccessPointArn": "The access point ARN used when evaluating an AND
predicate.
The access point ARN used when evaluating a metrics filter.
" } }, "AccountId": { "base": null, "refs": { - "AbortMultipartUploadRequest$ExpectedBucketOwner": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID that owns the destination S3 bucket. If no account ID is provided, the owner is not validated before exporting data.
Although this value is optional, we strongly recommend that you set it to help prevent problems if the destination bucket ownership changes.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
Destination bucket owner account ID. In a cross-account scenario, if you direct Amazon S3 to change replica ownership to the Amazon Web Services account that owns the destination bucket by specifying the AccessControlTranslation
property, this is the account ID of the destination bucket owner. For more information, see Replication Additional Configuration: Changing the Replica Owner in the Amazon S3 User Guide.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID that owns the destination S3 bucket. If no account ID is provided, the owner is not validated before exporting data.
Although this value is optional, we strongly recommend that you set it to help prevent problems if the destination bucket ownership changes.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected destination bucket owner. If the destination bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected source bucket owner. If the source bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request fails with the HTTP status code 403 Forbidden
(access denied).
The bucket name to which the upload was taking place.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "AbortMultipartUploadRequest$Bucket": "The bucket name to which the upload was taking place.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The Amazon Resource Name (ARN) of the bucket to which data is exported.
", "Bucket$Name": "The name of the bucket.
", - "CompleteMultipartUploadOutput$Bucket": "The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "CompleteMultipartUploadRequest$Bucket": "Name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "CopyObjectRequest$Bucket": "The name of the destination bucket.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "CompleteMultipartUploadOutput$Bucket": "The name of the bucket that contains the newly created object. Does not return the access point ARN or access point alias if used.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
Name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the destination bucket.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket to create.
", - "CreateMultipartUploadOutput$Bucket": "The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "CreateMultipartUploadRequest$Bucket": "The name of the bucket to which to initiate the upload
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "CreateMultipartUploadOutput$Bucket": "The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket to which to initiate the upload
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket from which an analytics configuration is deleted.
", "DeleteBucketCorsRequest$Bucket": "Specifies the bucket whose cors
configuration is being deleted.
The name of the bucket containing the server-side encryption configuration to delete.
", @@ -473,9 +475,9 @@ "DeleteBucketRequest$Bucket": "Specifies the bucket being deleted.
", "DeleteBucketTaggingRequest$Bucket": "The bucket that has the tag set to be removed.
", "DeleteBucketWebsiteRequest$Bucket": "The bucket name for which you want to remove the website configuration.
", - "DeleteObjectRequest$Bucket": "The bucket name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "DeleteObjectTaggingRequest$Bucket": "The bucket name containing the objects from which to remove the tags.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "DeleteObjectsRequest$Bucket": "The bucket name containing the objects to delete.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "DeleteObjectRequest$Bucket": "The bucket name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The bucket name containing the objects from which to remove the tags.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The bucket name containing the objects to delete.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The Amazon S3 bucket whose PublicAccessBlock
configuration you want to delete.
The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store the results.
", "GetBucketAccelerateConfigurationRequest$Bucket": "The name of the bucket for which the accelerate configuration is retrieved.
", @@ -500,30 +502,31 @@ "GetBucketVersioningRequest$Bucket": "The name of the bucket for which to get the versioning information.
", "GetBucketWebsiteRequest$Bucket": "The bucket name for which to get the website configuration.
", "GetObjectAclRequest$Bucket": "The bucket name that contains the object for which to get the ACL information.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", - "GetObjectLegalHoldRequest$Bucket": "The bucket name containing the object whose Legal Hold status you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "GetObjectAttributesRequest$Bucket": "The name of the bucket that contains the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The bucket name containing the object whose legal hold status you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "GetObjectLockConfigurationRequest$Bucket": "The bucket whose Object Lock configuration you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", - "GetObjectRequest$Bucket": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "GetObjectRequest$Bucket": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The bucket name containing the object whose retention settings you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", - "GetObjectTaggingRequest$Bucket": "The bucket name containing the object for which to get the tagging information.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "GetObjectTaggingRequest$Bucket": "The bucket name containing the object for which to get the tagging information.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket containing the object for which to get the torrent files.
", "GetPublicAccessBlockRequest$Bucket": "The name of the Amazon S3 bucket whose PublicAccessBlock
configuration you want to retrieve.
The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "HeadObjectRequest$Bucket": "The name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "HeadBucketRequest$Bucket": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The Amazon Resource Name (ARN) of the bucket where inventory results will be published.
", "ListBucketAnalyticsConfigurationsRequest$Bucket": "The name of the bucket from which analytics configurations are retrieved.
", "ListBucketIntelligentTieringConfigurationsRequest$Bucket": "The name of the Amazon S3 bucket whose configuration you want to modify or retrieve.
", "ListBucketInventoryConfigurationsRequest$Bucket": "The name of the bucket containing the inventory configurations to retrieve.
", "ListBucketMetricsConfigurationsRequest$Bucket": "The name of the bucket containing the metrics configurations to retrieve.
", "ListMultipartUploadsOutput$Bucket": "The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used.
", - "ListMultipartUploadsRequest$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "ListMultipartUploadsRequest$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The bucket name.
", "ListObjectVersionsRequest$Bucket": "The bucket name that contains the objects.
", "ListObjectsOutput$Name": "The bucket name.
", - "ListObjectsRequest$Bucket": "The name of the bucket containing the objects.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "ListObjectsV2Output$Name": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "ListObjectsV2Request$Bucket": "Bucket name to list.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "ListObjectsRequest$Bucket": "The name of the bucket containing the objects.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
Bucket name to list.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated. Does not return the access point ARN or access point alias if used.
", - "ListPartsRequest$Bucket": "The name of the bucket to which the parts are being uploaded.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "ListPartsRequest$Bucket": "The name of the bucket to which the parts are being uploaded.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket for which the accelerate configuration is set.
", "PutBucketAclRequest$Bucket": "The bucket to which to apply the ACL.
", "PutBucketAnalyticsConfigurationRequest$Bucket": "The name of the bucket to which an analytics configuration is stored.
", @@ -545,17 +548,17 @@ "PutBucketVersioningRequest$Bucket": "The bucket name.
", "PutBucketWebsiteRequest$Bucket": "The bucket name.
", "PutObjectAclRequest$Bucket": "The bucket name that contains the object to which you want to attach the ACL.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", - "PutObjectLegalHoldRequest$Bucket": "The bucket name containing the object that you want to place a Legal Hold on.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", + "PutObjectLegalHoldRequest$Bucket": "The bucket name containing the object that you want to place a legal hold on.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", "PutObjectLockConfigurationRequest$Bucket": "The bucket whose Object Lock configuration you want to create or replace.
", - "PutObjectRequest$Bucket": "The bucket name to which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "PutObjectRequest$Bucket": "The bucket name to which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The bucket name that contains the object you want to apply this Object Retention configuration to.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
", - "PutObjectTaggingRequest$Bucket": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "PutObjectTaggingRequest$Bucket": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the Amazon S3 bucket whose PublicAccessBlock
configuration you want to set.
The bucket name containing the object to restore.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "RestoreObjectRequest$Bucket": "The bucket name containing the object to restore.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket where the restore results will be placed.
", "SelectObjectContentRequest$Bucket": "The S3 bucket.
", - "UploadPartCopyRequest$Bucket": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "UploadPartRequest$Bucket": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
" + "UploadPartCopyRequest$Bucket": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The list of buckets owned by the requestor.
" + "ListBucketsOutput$Buckets": "The list of buckets owned by the requester.
" } }, "BypassGovernanceRetention": { "base": null, "refs": { - "DeleteObjectRequest$BypassGovernanceRetention": "Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation. To use this header, you must have the s3:PutBucketPublicAccessBlock
permission.
Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have the s3:PutBucketPublicAccessBlock
permission.
Indicates whether S3 Object Lock should bypass Governance-mode restrictions to process this operation. To use this header, you must have the s3:BypassGovernanceRetention
permission.
Specifies whether you want to delete this object even if it has a Governance-type Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention
permission.
Indicates whether this action should bypass Governance-mode restrictions.
" } }, @@ -642,6 +645,141 @@ "WriteGetObjectResponseRequest$CacheControl": "Specifies caching behavior along the request/reply chain.
" } }, + "Checksum": { + "base": "Contains all the possible checksum or digest values for an object.
", + "refs": { + "GetObjectAttributesOutput$Checksum": "The checksum or digest of the object.
" + } + }, + "ChecksumAlgorithm": { + "base": null, + "refs": { + "ChecksumAlgorithmList$member": null, + "CopyObjectRequest$ChecksumAlgorithm": "Indicates the algorithm you want Amazon S3 to use to create the checksum for the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "CreateMultipartUploadOutput$ChecksumAlgorithm": "The algorithm that was used to create a checksum of the object.
", + "CreateMultipartUploadRequest$ChecksumAlgorithm": "Indicates the algorithm you want Amazon S3 to use to create the checksum for the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "DeleteObjectsRequest$ChecksumAlgorithm": "Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
This checksum algorithm must be the same for all parts and it match the checksum value supplied in the CreateMultipartUpload
request.
The algorithm that was used to create a checksum of the object.
", + "MultipartUpload$ChecksumAlgorithm": "The algorithm that was used to create a checksum of the object.
", + "PutBucketAccelerateConfigurationRequest$ChecksumAlgorithm": "Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
Indicates the algorithm used to create the checksum for the object when using the SDK. This header will not provide any additional functionality if not using the SDK. When sending this header, there must be a corresponding x-amz-checksum
or x-amz-trailer
header sent. Otherwise, Amazon S3 fails the request with the HTTP status code 400 Bad Request
. For more information, see Checking object integrity in the Amazon S3 User Guide.
If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
parameter.
This checksum algorithm must be the same for all parts and it match the checksum value supplied in the CreateMultipartUpload
request.
The algorithm that was used to create a checksum of the object.
", + "ObjectVersion$ChecksumAlgorithm": "The algorithm that was used to create a checksum of the object.
" + } + }, + "ChecksumCRC32": { + "base": null, + "refs": { + "Checksum$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadOutput$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadRequest$ChecksumCRC32": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "CompletedPart$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyObjectResult$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyPartResult$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "GetObjectOutput$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "HeadObjectOutput$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "ObjectPart$ChecksumCRC32": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "Part$ChecksumCRC32": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectOutput$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectRequest$ChecksumCRC32": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartOutput$ChecksumCRC32": "The base64-encoded, 32-bit CRC32 checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartRequest$ChecksumCRC32": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "WriteGetObjectResponseRequest$ChecksumCRC32": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
" + } + }, + "ChecksumCRC32C": { + "base": null, + "refs": { + "Checksum$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadOutput$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadRequest$ChecksumCRC32C": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "CompletedPart$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyObjectResult$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyPartResult$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "GetObjectOutput$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "HeadObjectOutput$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "ObjectPart$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "Part$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectOutput$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectRequest$ChecksumCRC32C": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartOutput$ChecksumCRC32C": "The base64-encoded, 32-bit CRC32C checksum of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartRequest$ChecksumCRC32C": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32C checksum of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "WriteGetObjectResponseRequest$ChecksumCRC32C": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
" + } + }, + "ChecksumMode": { + "base": null, + "refs": { + "GetObjectRequest$ChecksumMode": "To retrieve the checksum, this mode must be enabled.
", + "HeadObjectRequest$ChecksumMode": "To retrieve the checksum, this parameter must be enabled.
In addition, if you enable ChecksumMode
and the object is encrypted with Amazon Web Services Key Management Service (Amazon Web Services KMS), you must have permission to use the kms:Decrypt
action for the request to succeed.
The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadOutput$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadRequest$ChecksumSHA1": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "CompletedPart$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyObjectResult$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyPartResult$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "GetObjectOutput$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "HeadObjectOutput$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "ObjectPart$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "Part$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectOutput$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectRequest$ChecksumSHA1": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartOutput$ChecksumSHA1": "The base64-encoded, 160-bit SHA-1 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartRequest$ChecksumSHA1": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 160-bit SHA-1 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "WriteGetObjectResponseRequest$ChecksumSHA1": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
" + } + }, + "ChecksumSHA256": { + "base": null, + "refs": { + "Checksum$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadOutput$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CompleteMultipartUploadRequest$ChecksumSHA256": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "CompletedPart$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyObjectResult$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "CopyPartResult$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "GetObjectOutput$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "HeadObjectOutput$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "ObjectPart$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "Part$ChecksumSHA256": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectOutput$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "PutObjectRequest$ChecksumSHA256": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartOutput$ChecksumSHA256": "The base64-encoded, 256-bit SHA-256 digest of the object. This will only be present if it was uploaded with the object. With multipart uploads, this may not be a checksum value of the object. For more information about how checksums are calculated with multipart uploads, see Checking object integrity in the Amazon S3 User Guide.
", + "UploadPartRequest$ChecksumSHA256": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 256-bit SHA-256 digest of the object. For more information, see Checking object integrity in the Amazon S3 User Guide.
", + "WriteGetObjectResponseRequest$ChecksumSHA256": "This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This specifies the base64-encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda function. This may not match the checksum for the object stored in Amazon S3. Amazon S3 will perform validation of the checksum values only when the original GetObject
request required checksum validation. For more information about checksums, see Checking object integrity in the Amazon S3 User Guide.
Only one checksum header can be specified at a time. If you supply multiple checksum headers, this request will fail.
" + } + }, "CloudFunction": { "base": null, "refs": { @@ -851,8 +989,8 @@ "CopySource": { "base": null, "refs": { - "CopyObjectRequest$CopySource": "Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:
For objects not accessed through an access point, specify the name of the source bucket and the key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf
from the bucket awsexamplebucket
, use awsexamplebucket/reports/january.pdf
. The value must be URL encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.
Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL encoded.
To copy a specific version of an object, append ?versionId=<version-id>
to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.
Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:
For objects not accessed through an access point, specify the name of the source bucket and key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf
from the bucket awsexamplebucket
, use awsexamplebucket/reports/january.pdf
. The value must be URL encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.
Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL encoded.
To copy a specific version of an object, append ?versionId=<version-id>
to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.
Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:
For objects not accessed through an access point, specify the name of the source bucket and the key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf
from the bucket awsexamplebucket
, use awsexamplebucket/reports/january.pdf
. The value must be URL-encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.
Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL-encoded.
To copy a specific version of an object, append ?versionId=<version-id>
to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.
Specifies the source object for the copy operation. You specify the value in one of two formats, depending on whether you want to access the source object through an access point:
For objects not accessed through an access point, specify the name of the source bucket and key of the source object, separated by a slash (/). For example, to copy the object reports/january.pdf
from the bucket awsexamplebucket
, use awsexamplebucket/reports/january.pdf
. The value must be URL-encoded.
For objects accessed through access points, specify the Amazon Resource Name (ARN) of the object as accessed through the access point, in the format arn:aws:s3:<Region>:<account-id>:accesspoint/<access-point-name>/object/<key>
. For example, to copy the object reports/january.pdf
through access point my-access-point
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
. The value must be URL encoded.
Amazon S3 supports copy operations using access points only when the source and destination buckets are in the same Amazon Web Services Region.
Alternatively, for objects accessed through Amazon S3 on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:<Region>:<account-id>:outpost/<outpost-id>/object/<key>
. For example, to copy the object reports/january.pdf
through outpost my-outpost
owned by account 123456789012
in Region us-west-2
, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
. The value must be URL-encoded.
To copy a specific version of an object, append ?versionId=<version-id>
to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
). If you don't specify a version ID, Amazon S3 copies the latest version of the source object.
Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.
", "DeletedObject$DeleteMarker": "Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or not (false) a delete marker was created.
", + "GetObjectAttributesOutput$DeleteMarker": "Specifies whether the object retrieved was (true
) or was not (false
) a delete marker. If false
, this response header does not appear in the response.
Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.
", "HeadObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.
", "WriteGetObjectResponseRequest$DeleteMarker": "Specifies whether an object stored in Amazon S3 is (true
) or is not (false
) a delete marker.
Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits.
", + "CompleteMultipartUploadOutput$ETag": "Entity tag that identifies the newly created object's data. Objects with different object data will have different entity tags. The entity tag is an opaque string. The entity tag may or may not be an MD5 digest of the object data. If the entity tag is not an MD5 digest of the object data, it will contain one or more nonhexadecimal characters and/or will consist of less than 32 or more than 32 hexadecimal digits. For more information about how the entity tag is calculated, see Checking object integrity in the Amazon S3 User Guide.
", "CompletedPart$ETag": "Entity tag returned when the part was uploaded.
", "CopyObjectResult$ETag": "Returns the ETag of the new object. The ETag reflects only changes to the contents of an object, not its metadata.
", "CopyPartResult$ETag": "Entity tag of the object.
", - "GetObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.
", - "HeadObjectOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.
", + "GetObjectAttributesOutput$ETag": "An ETag is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.
", + "GetObjectOutput$ETag": "An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.
", + "HeadObjectOutput$ETag": "An entity tag (ETag) is an opaque identifier assigned by a web server to a specific version of a resource found at a URL.
", "Object$ETag": "The entity tag is a hash of the object. The ETag reflects changes only to the contents of an object, not its metadata. The ETag may or may not be an MD5 digest of the object data. Whether or not it is depends on how the object was created and how it is encrypted as described below:
Objects created by the PUT Object, POST Object, or Copy operation, or through the Amazon Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object data.
Objects created by the PUT Object, POST Object, or Copy operation, or through the Amazon Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data.
If an object is created by either the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the method of encryption.
The entity tag is an MD5 hash of that version of the object.
", "Part$ETag": "Entity tag returned when the part was uploaded.
", @@ -1246,7 +1386,7 @@ "ErrorCode": { "base": null, "refs": { - "WriteGetObjectResponseRequest$ErrorCode": "A string that uniquely identifies an error condition. Returned in the <Code> tag of the error XML response for a corresponding GetObject
call. Cannot be used with a successful StatusCode
header or when the transformed object is provided in the body. All error codes from S3 are sentence-cased. Regex value is \"^[A-Z][a-zA-Z]+$\".
A string that uniquely identifies an error condition. Returned in the <Code> tag of the error XML response for a corresponding GetObject
call. Cannot be used with a successful StatusCode
header or when the transformed object is provided in the body. All error codes from S3 are sentence-cased. The regular expression (regex) value is \"^[A-Z][a-zA-Z]+$\"
.
If the object expiration is configured, this will contain the expiration date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
", + "CompleteMultipartUploadOutput$Expiration": "If the object expiration is configured, this will contain the expiration date (expiry-date
) and rule ID (rule-id
). The value of rule-id
is URL-encoded.
If the object expiration is configured, the response includes this header.
", - "GetObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL encoded.
", - "HeadObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL encoded.
", - "PutObjectOutput$Expiration": "If the expiration is configured for the object (see PutBucketLifecycleConfiguration), the response includes this header. It includes the expiry-date and rule-id key-value pairs that provide information about object expiration. The value of the rule-id is URL encoded.
", - "WriteGetObjectResponseRequest$Expiration": "If object stored in Amazon S3 expiration is configured (see PUT Bucket lifecycle) it includes expiry-date and rule-id key-value pairs providing object expiration information. The value of the rule-id is URL encoded.
" + "GetObjectOutput$Expiration": "If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs providing object expiration information. The value of the rule-id
is URL-encoded.
If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs providing object expiration information. The value of the rule-id
is URL-encoded.
If the expiration is configured for the object (see PutBucketLifecycleConfiguration), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs that provide information about object expiration. The value of the rule-id
is URL-encoded.
If the object expiration is configured (see PUT Bucket lifecycle), the response includes this header. It includes the expiry-date
and rule-id
key-value pairs that provide the object expiration information. The value of the rule-id
is URL-encoded.
A collection of parts associated with a multipart upload.
", + "refs": { + "GetObjectAttributesOutput$ObjectParts": "A collection of parts associated with a multipart upload.
" + } + }, + "GetObjectAttributesRequest": { + "base": null, + "refs": { + } + }, "GetObjectLegalHoldOutput": { "base": null, "refs": { @@ -1658,7 +1814,7 @@ "GetObjectResponseStatusCode": { "base": null, "refs": { - "WriteGetObjectResponseRequest$StatusCode": "The integer status code for an HTTP response of a corresponding GetObject
request.
Status Codes
200 - OK
206 - Partial Content
304 - Not Modified
400 - Bad Request
401 - Unauthorized
403 - Forbidden
404 - Not Found
405 - Method Not Allowed
409 - Conflict
411 - Length Required
412 - Precondition Failed
416 - Range Not Satisfiable
500 - Internal Server Error
503 - Service Unavailable
The integer status code for an HTTP response of a corresponding GetObject
request.
Status Codes
200 - OK
206 - Partial Content
304 - Not Modified
400 - Bad Request
401 - Unauthorized
403 - Forbidden
404 - Not Found
405 - Method Not Allowed
409 - Conflict
411 - Length Required
412 - Precondition Failed
416 - Range Not Satisfiable
500 - Internal Server Error
503 - Service Unavailable
Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).
", - "HeadObjectRequest$IfMatch": "Return the object only if its entity tag (ETag) is the same as the one specified, otherwise return a 412 (precondition failed).
" + "GetObjectRequest$IfMatch": "Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error.
", + "HeadObjectRequest$IfMatch": "Return the object only if its entity tag (ETag) is the same as the one specified; otherwise, return a 412 (precondition failed) error.
" } }, "IfModifiedSince": { "base": null, "refs": { - "GetObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).
", - "HeadObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time, otherwise return a 304 (not modified).
" + "GetObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error.
", + "HeadObjectRequest$IfModifiedSince": "Return the object only if it has been modified since the specified time; otherwise, return a 304 (not modified) error.
" } }, "IfNoneMatch": { "base": null, "refs": { - "GetObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).
", - "HeadObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified, otherwise return a 304 (not modified).
" + "GetObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified; otherwise, return a 304 (not modified) error.
", + "HeadObjectRequest$IfNoneMatch": "Return the object only if its entity tag (ETag) is different from the one specified; otherwise, return a 304 (not modified) error.
" } }, "IfUnmodifiedSince": { "base": null, "refs": { - "GetObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).
", - "HeadObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time, otherwise return a 412 (precondition failed).
" + "GetObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time; otherwise, return a 412 (precondition failed) error.
", + "HeadObjectRequest$IfUnmodifiedSince": "Return the object only if it has not been modified since the specified time; otherwise, return a 412 (precondition failed) error.
" } }, "IndexDocument": { @@ -2046,8 +2202,9 @@ "IsTruncated": { "base": null, "refs": { + "GetObjectAttributesParts$IsTruncated": "Indicates whether the returned list of parts is truncated. A value of true
indicates that the list was truncated. A list can be truncated if the number of parts exceeds the limit returned in the MaxParts
element.
Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.
", - "ListBucketIntelligentTieringConfigurationsOutput$IsTruncated": "Indicates whether the returned list of analytics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.
", + "ListBucketIntelligentTieringConfigurationsOutput$IsTruncated": "Indicates whether the returned list of analytics configurations is complete. A value of true
indicates that the list is not complete and the NextContinuationToken
will be provided for a subsequent request.
Tells whether the returned list of inventory configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken is provided for a subsequent request.
", "ListBucketMetricsConfigurationsOutput$IsTruncated": "Indicates whether the returned list of metrics configurations is complete. A value of true indicates that the list is not complete and the NextContinuationToken will be provided for a subsequent request.
", "ListMultipartUploadsOutput$IsTruncated": "Indicates whether the returned list of multipart uploads is truncated. A value of true indicates that the list was truncated. The list can be truncated if the number of multipart uploads exceeds the limit allowed or specified by max uploads.
", @@ -2126,6 +2283,7 @@ "CopyObjectResult$LastModified": "Creation date of the object.
", "CopyPartResult$LastModified": "Date and time at which the object was uploaded.
", "DeleteMarkerEntry$LastModified": "Date and time the object was last modified.
", + "GetObjectAttributesOutput$LastModified": "The creation date of the object.
", "GetObjectOutput$LastModified": "Creation date of the object.
", "HeadObjectOutput$LastModified": "Creation date of the object.
", "Object$LastModified": "Creation date of the object.
", @@ -2162,7 +2320,7 @@ "LifecycleRuleFilter": { "base": "The Filter
is used to identify objects that a Lifecycle Rule applies to. A Filter
must have exactly one of Prefix
, Tag
, or And
specified.
The Filter
is used to identify objects that a Lifecycle Rule applies to. A Filter
must have exactly one of Prefix
, Tag
, or And
specified. Filter
is required if the LifecycleRule
does not containt a Prefix
element.
The Filter
is used to identify objects that a Lifecycle Rule applies to. A Filter
must have exactly one of Prefix
, Tag
, or And
specified. Filter
is required if the LifecycleRule
does not contain a Prefix
element.
The URI that identifies the newly created object.
", - "CreateBucketOutput$Location": "Specifies the Region where the bucket will be created. If you are creating a bucket on the US East (N. Virginia) Region (us-east-1), you do not need to specify the location.
" + "CreateBucketOutput$Location": "A forward slash followed by the name of the bucket.
" } }, "LocationPrefix": { @@ -2334,6 +2492,8 @@ "MaxParts": { "base": null, "refs": { + "GetObjectAttributesParts$MaxParts": "The maximum number of parts allowed in the response.
", + "GetObjectAttributesRequest$MaxParts": "Sets the maximum number of parts to return.
", "ListPartsOutput$MaxParts": "Maximum number of parts that were allowed in the response.
", "ListPartsRequest$MaxParts": "Sets the maximum number of parts to return.
" } @@ -2490,6 +2650,7 @@ "NextPartNumberMarker": { "base": null, "refs": { + "GetObjectAttributesParts$NextPartNumberMarker": "When a list is truncated, this element specifies the last part in the list, as well as the value to use for the PartNumberMarker
request parameter in a subsequent request.
When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.
" } }, @@ -2592,6 +2753,18 @@ "refs": { } }, + "ObjectAttributes": { + "base": null, + "refs": { + "ObjectAttributesList$member": null + } + }, + "ObjectAttributesList": { + "base": null, + "refs": { + "GetObjectAttributesRequest$ObjectAttributes": "An XML header that specifies the fields at the root level that you want returned in the response. Fields that you do not specify are not returned.
" + } + }, "ObjectCannedACL": { "base": null, "refs": { @@ -2630,7 +2803,8 @@ "Error$Key": "The error key.
", "ErrorDocument$Key": "The object key name to use when a 4XX class error occurs.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
The key of the object for which to get the ACL information.
", - "GetObjectLegalHoldRequest$Key": "The key name for the object whose Legal Hold status you want to retrieve.
", + "GetObjectAttributesRequest$Key": "The object key.
", + "GetObjectLegalHoldRequest$Key": "The key name for the object whose legal hold status you want to retrieve.
", "GetObjectRequest$Key": "Key of the object to get.
", "GetObjectRetentionRequest$Key": "The key name for the object whose retention settings you want to retrieve.
", "GetObjectTaggingRequest$Key": "Object key for which to get the tagging information.
", @@ -2642,8 +2816,8 @@ "Object$Key": "The name that you assign to an object. You use the object key to retrieve the object.
", "ObjectIdentifier$Key": "Key name of the object.
Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.
The object key.
", - "PutObjectAclRequest$Key": "Key for which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", - "PutObjectLegalHoldRequest$Key": "The key name for the object that you want to place a Legal Hold on.
", + "PutObjectAclRequest$Key": "Key for which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com
. When using this action with S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.
The key name for the object that you want to place a legal hold on.
", "PutObjectRequest$Key": "Object key for which the PUT action was initiated.
", "PutObjectRetentionRequest$Key": "The key name for the object that you want to apply this Object Retention configuration to.
", "PutObjectTaggingRequest$Key": "Name of the object key.
", @@ -2681,20 +2855,20 @@ } }, "ObjectLockLegalHold": { - "base": "A Legal Hold configuration for an object.
", + "base": "A legal hold configuration for an object.
", "refs": { - "GetObjectLegalHoldOutput$LegalHold": "The current Legal Hold status for the specified object.
", - "PutObjectLegalHoldRequest$LegalHold": "Container element for the Legal Hold configuration you want to apply to the specified object.
" + "GetObjectLegalHoldOutput$LegalHold": "The current legal hold status for the specified object.
", + "PutObjectLegalHoldRequest$LegalHold": "Container element for the legal hold configuration you want to apply to the specified object.
" } }, "ObjectLockLegalHoldStatus": { "base": null, "refs": { - "CopyObjectRequest$ObjectLockLegalHoldStatus": "Specifies whether you want to apply a Legal Hold to the copied object.
", - "CreateMultipartUploadRequest$ObjectLockLegalHoldStatus": "Specifies whether you want to apply a Legal Hold to the uploaded object.
", + "CopyObjectRequest$ObjectLockLegalHoldStatus": "Specifies whether you want to apply a legal hold to the copied object.
", + "CreateMultipartUploadRequest$ObjectLockLegalHoldStatus": "Specifies whether you want to apply a legal hold to the uploaded object.
", "GetObjectOutput$ObjectLockLegalHoldStatus": "Indicates whether this object has an active legal hold. This field is only returned if you have permission to view an object's legal hold status.
", "HeadObjectOutput$ObjectLockLegalHoldStatus": "Specifies whether a legal hold is in effect for this object. This header is only returned if the requester has the s3:GetObjectLegalHold
permission. This header is not returned if the specified version of this object has never had a legal hold applied. For more information about S3 Object Lock, see Object Lock.
Indicates whether the specified object has a Legal Hold in place.
", + "ObjectLockLegalHold$Status": "Indicates whether the specified object has a legal hold in place.
", "PutObjectRequest$ObjectLockLegalHoldStatus": "Specifies whether a legal hold will be applied to this object. For more information about S3 Object Lock, see Object Lock.
", "WriteGetObjectResponseRequest$ObjectLockLegalHoldStatus": "Indicates whether an object stored in Amazon S3 has an active legal hold.
" } @@ -2760,6 +2934,18 @@ "OwnershipControlsRule$ObjectOwnership": null } }, + "ObjectPart": { + "base": "A container for elements related to an individual part.
", + "refs": { + "PartsList$member": null + } + }, + "ObjectSize": { + "base": null, + "refs": { + "GetObjectAttributesOutput$ObjectSize": "The size of the object in bytes.
" + } + }, "ObjectSizeGreaterThanBytes": { "base": null, "refs": { @@ -2799,7 +2985,9 @@ "DeletedObject$VersionId": "The version ID of the deleted object.
", "Error$VersionId": "The version ID of the error.
", "GetObjectAclRequest$VersionId": "VersionId used to reference a specific version of the object.
", - "GetObjectLegalHoldRequest$VersionId": "The version ID of the object whose Legal Hold status you want to retrieve.
", + "GetObjectAttributesOutput$VersionId": "The version ID of the object.
", + "GetObjectAttributesRequest$VersionId": "The version ID used to reference a specific version of the object.
", + "GetObjectLegalHoldRequest$VersionId": "The version ID of the object whose legal hold status you want to retrieve.
", "GetObjectOutput$VersionId": "Version of the object.
", "GetObjectRequest$VersionId": "VersionId used to reference a specific version of the object.
", "GetObjectRetentionRequest$VersionId": "The version ID for the object whose retention settings you want to retrieve.
", @@ -2810,7 +2998,7 @@ "ObjectIdentifier$VersionId": "VersionId for the specific version of the object to delete.
", "ObjectVersion$VersionId": "Version ID of an object.
", "PutObjectAclRequest$VersionId": "VersionId used to reference a specific version of the object.
", - "PutObjectLegalHoldRequest$VersionId": "The version ID of the object that you want to place a Legal Hold on.
", + "PutObjectLegalHoldRequest$VersionId": "The version ID of the object that you want to place a legal hold on.
", "PutObjectOutput$VersionId": "Version of the object.
", "PutObjectRetentionRequest$VersionId": "The version ID for the object that you want to apply this Object Retention configuration to.
", "PutObjectTaggingOutput$VersionId": "The versionId of the object the tag-set was added to.
", @@ -2901,6 +3089,7 @@ "CompletedPart$PartNumber": "Part number that identifies the part. This is a positive integer between 1 and 10,000.
", "GetObjectRequest$PartNumber": "Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. Useful for downloading just a part of an object.
", "HeadObjectRequest$PartNumber": "Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object.
", + "ObjectPart$PartNumber": "The part number identifying the part. This value is a positive integer between 1 and 10,000.
", "Part$PartNumber": "Part number identifying the part. This is a positive integer between 1 and 10,000.
", "UploadPartCopyRequest$PartNumber": "Part number of part being copied. This is a positive integer between 1 and 10,000.
", "UploadPartRequest$PartNumber": "Part number of part being uploaded. This is a positive integer between 1 and 10,000.
" @@ -2909,6 +3098,8 @@ "PartNumberMarker": { "base": null, "refs": { + "GetObjectAttributesParts$PartNumberMarker": "The marker for the current part.
", + "GetObjectAttributesRequest$PartNumberMarker": "Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.
", "ListPartsOutput$PartNumberMarker": "When a list is truncated, this element specifies the last part in the list, as well as the value to use for the part-number-marker request parameter in a subsequent request.
", "ListPartsRequest$PartNumberMarker": "Specifies the part after which listing should begin. Only parts with higher part numbers will be listed.
" } @@ -2922,11 +3113,18 @@ "PartsCount": { "base": null, "refs": { - "GetObjectOutput$PartsCount": "The count of parts this object has.
", - "HeadObjectOutput$PartsCount": "The count of parts this object has.
", + "GetObjectAttributesParts$TotalPartsCount": "The total number of parts.
", + "GetObjectOutput$PartsCount": "The count of parts this object has. This value is only returned if you specify partNumber
in your request and the object was uploaded as a multipart upload.
The count of parts this object has. This value is only returned if you specify partNumber
in your request and the object was uploaded as a multipart upload.
The count of parts this object has.
" } }, + "PartsList": { + "base": null, + "refs": { + "GetObjectAttributesParts$Parts": "A container for elements related to a particular part. A response can contain zero or more Parts
elements.
A single character used for escaping the quotation mark character inside an already escaped value. For example, the value \"\"\" a , b \"\"\" is parsed as \" a , b \".
", + "CSVInput$QuoteEscapeCharacter": "A single character used for escaping the quotation mark character inside an already escaped value. For example, the value \"\"\" a , b \"\"\"
is parsed as \" a , b \"
.
The single character used for escaping the quote character inside an already escaped value.
" } }, @@ -3336,7 +3534,7 @@ "base": null, "refs": { "GetObjectOutput$ReplicationStatus": "Amazon S3 can return this if your request involves a bucket that is either a source or destination in a replication rule.
", - "HeadObjectOutput$ReplicationStatus": "Amazon S3 can return this header if your request involves a bucket that is either a source or a destination in a replication rule.
In replication, you have a source bucket on which you configure replication and destination bucket or buckets where Amazon S3 stores object replicas. When you request an object (GetObject
) or object metadata (HeadObject
) from these buckets, Amazon S3 will return the x-amz-replication-status
header in the response as follows:
If requesting an object from the source bucket — Amazon S3 will return the x-amz-replication-status
header if the object in your request is eligible for replication.
For example, suppose that in your replication configuration, you specify object prefix TaxDocs
requesting Amazon S3 to replicate objects with key prefix TaxDocs
. Any objects you upload with this key name prefix, for example TaxDocs/document1.pdf
, are eligible for replication. For any object request with this key name prefix, Amazon S3 will return the x-amz-replication-status
header with value PENDING, COMPLETED or FAILED indicating object replication status.
If requesting an object from a destination bucket — Amazon S3 will return the x-amz-replication-status
header with value REPLICA if the object in your request is a replica that Amazon S3 created and there is no replica modification replication in progress.
When replicating objects to multiple destination buckets the x-amz-replication-status
header acts differently. The header of the source object will only return a value of COMPLETED when replication is successful to all destinations. The header will remain at value PENDING until replication has completed for all destinations. If one or more destinations fails replication the header will return FAILED.
For more information, see Replication.
", + "HeadObjectOutput$ReplicationStatus": "Amazon S3 can return this header if your request involves a bucket that is either a source or a destination in a replication rule.
In replication, you have a source bucket on which you configure replication and destination bucket or buckets where Amazon S3 stores object replicas. When you request an object (GetObject
) or object metadata (HeadObject
) from these buckets, Amazon S3 will return the x-amz-replication-status
header in the response as follows:
If requesting an object from the source bucket, Amazon S3 will return the x-amz-replication-status
header if the object in your request is eligible for replication.
For example, suppose that in your replication configuration, you specify object prefix TaxDocs
requesting Amazon S3 to replicate objects with key prefix TaxDocs
. Any objects you upload with this key name prefix, for example TaxDocs/document1.pdf
, are eligible for replication. For any object request with this key name prefix, Amazon S3 will return the x-amz-replication-status
header with value PENDING, COMPLETED or FAILED indicating object replication status.
If requesting an object from a destination bucket, Amazon S3 will return the x-amz-replication-status
header with value REPLICA if the object in your request is a replica that Amazon S3 created and there is no replica modification replication in progress.
When replicating objects to multiple destination buckets, the x-amz-replication-status
header acts differently. The header of the source object will only return a value of COMPLETED when replication is successful to all destinations. The header will remain at value PENDING until replication has completed for all destinations. If one or more destinations fails replication the header will return FAILED.
For more information, see Replication.
", "WriteGetObjectResponseRequest$ReplicationStatus": "Indicates if request involves bucket that is either a source or destination in a Replication rule. For more information about S3 Replication, see Replication.
" } }, @@ -3369,6 +3567,7 @@ "DeleteObjectOutput$RequestCharged": null, "DeleteObjectsOutput$RequestCharged": null, "GetObjectAclOutput$RequestCharged": null, + "GetObjectAttributesOutput$RequestCharged": null, "GetObjectOutput$RequestCharged": null, "GetObjectTorrentOutput$RequestCharged": null, "HeadObjectOutput$RequestCharged": null, @@ -3385,7 +3584,7 @@ } }, "RequestPayer": { - "base": "Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from requester pays buckets, see Downloading Objects in Requestor Pays Buckets in the Amazon S3 User Guide.
", + "base": "Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.
", "refs": { "AbortMultipartUploadRequest$RequestPayer": null, "CompleteMultipartUploadRequest$RequestPayer": null, @@ -3394,6 +3593,7 @@ "DeleteObjectRequest$RequestPayer": null, "DeleteObjectsRequest$RequestPayer": null, "GetObjectAclRequest$RequestPayer": null, + "GetObjectAttributesRequest$RequestPayer": null, "GetObjectLegalHoldRequest$RequestPayer": null, "GetObjectRequest$RequestPayer": null, "GetObjectRetentionRequest$RequestPayer": null, @@ -3557,17 +3757,20 @@ "SSECustomerAlgorithm": { "base": null, "refs": { + "CompleteMultipartUploadRequest$SSECustomerAlgorithm": "The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "CopyObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
", "CopyObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (for example, AES256).
", "CreateMultipartUploadOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
", "CreateMultipartUploadRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (for example, AES256).
", + "GetObjectAttributesRequest$SSECustomerAlgorithm": "Specifies the algorithm to use when encrypting the object (for example, AES256).
", "GetObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
", "GetObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when decrypting the object (for example, AES256).
", "HeadObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
", "HeadObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (for example, AES256).
", + "ListPartsRequest$SSECustomerAlgorithm": "The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "PutObjectOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
", "PutObjectRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (for example, AES256).
", - "SelectObjectContentRequest$SSECustomerAlgorithm": "The SSE Algorithm used to encrypt the object. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.
", + "SelectObjectContentRequest$SSECustomerAlgorithm": "The server-side encryption (SSE) algorithm used to encrypt the object. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "UploadPartCopyOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
", "UploadPartCopyRequest$SSECustomerAlgorithm": "Specifies the algorithm to use to when encrypting the object (for example, AES256).
", "UploadPartOutput$SSECustomerAlgorithm": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header confirming the encryption algorithm used.
", @@ -3578,12 +3781,15 @@ "SSECustomerKey": { "base": null, "refs": { + "CompleteMultipartUploadRequest$SSECustomerKey": "The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "CopyObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header.
Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header.
Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header.
Specifies the customer-provided encryption key for Amazon S3 used to encrypt the data. This value is used to decrypt the object when recovering it and must match the one used when storing the data. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header.
Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header.
The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "PutObjectRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header.
The SSE Customer Key. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.
", + "SelectObjectContentRequest$SSECustomerKey": "The server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "UploadPartCopyRequest$SSECustomerKey": "Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
header. This must be the same encryption key specified in the initiate multipart upload request.
Specifies the customer-provided encryption key for Amazon S3 to use in encrypting data. This value is used to store the object and then it is discarded; Amazon S3 does not store the encryption key. The key must be appropriate for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm header
. This must be the same encryption key specified in the initiate multipart upload request.
The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "CopyObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
", "CopyObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
", "CreateMultipartUploadOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
", "CreateMultipartUploadRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
", + "GetObjectAttributesRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
", "GetObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
", "GetObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
", "HeadObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
", "HeadObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
", + "ListPartsRequest$SSECustomerKeyMD5": "The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "PutObjectOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
", "PutObjectRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
", - "SelectObjectContentRequest$SSECustomerKeyMD5": "The SSE Customer Key MD5. For more information, see Server-Side Encryption (Using Customer-Provided Encryption Keys.
", + "SelectObjectContentRequest$SSECustomerKeyMD5": "The MD5 server-side encryption (SSE) customer managed key. This parameter is needed only when the object was created using a checksum algorithm. For more information, see Protecting data using SSE-C keys in the Amazon S3 User Guide.
", "UploadPartCopyOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
", "UploadPartCopyRequest$SSECustomerKeyMD5": "Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. Amazon S3 uses this header for a message integrity check to ensure that the encryption key was transmitted without error.
", "UploadPartOutput$SSECustomerKeyMD5": "If server-side encryption with a customer-provided encryption key was requested, the response will include this header to provide round-trip message integrity verification of the customer-provided encryption key.
", @@ -3727,7 +3936,7 @@ "Setting": { "base": null, "refs": { - "PublicAccessBlockConfiguration$BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE
causes the following behavior:
PUT Bucket acl and PUT Object acl calls fail if the specified ACL is public.
PUT Object calls fail if the request includes a public ACL.
PUT Bucket calls fail if the request includes a public ACL.
Enabling this setting doesn't affect existing policies or ACLs.
", + "PublicAccessBlockConfiguration$BlockPublicAcls": "Specifies whether Amazon S3 should block public access control lists (ACLs) for this bucket and objects in this bucket. Setting this element to TRUE
causes the following behavior:
PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public.
PUT Object calls fail if the request includes a public ACL.
PUT Bucket calls fail if the request includes a public ACL.
Enabling this setting doesn't affect existing policies or ACLs.
", "PublicAccessBlockConfiguration$IgnorePublicAcls": "Specifies whether Amazon S3 should ignore public ACLs for this bucket and objects in this bucket. Setting this element to TRUE
causes Amazon S3 to ignore all public ACLs on this bucket and objects in this bucket.
Enabling this setting doesn't affect the persistence of any existing ACLs and doesn't prevent new public ACLs from being set.
", "PublicAccessBlockConfiguration$BlockPublicPolicy": "Specifies whether Amazon S3 should block public bucket policies for this bucket. Setting this element to TRUE
causes Amazon S3 to reject calls to PUT Bucket policy if the specified bucket policy allows public access.
Enabling this setting doesn't affect existing bucket policies.
", "PublicAccessBlockConfiguration$RestrictPublicBuckets": "Specifies whether Amazon S3 should restrict public bucket policies for this bucket. Setting this element to TRUE
restricts access to this bucket to only Amazon Web Service principals and authorized users within this account if the bucket has a public policy.
Enabling this setting doesn't affect previously stored bucket policies, except that public and cross-account access within any public bucket policy, including non-public delegation to specific accounts, is blocked.
" @@ -3737,6 +3946,7 @@ "base": null, "refs": { "Object$Size": "Size in bytes of the object
", + "ObjectPart$Size": "The size of the uploaded part in bytes.
", "ObjectVersion$Size": "Size in bytes of the object.
", "Part$Size": "Size in bytes of the uploaded part data.
" } @@ -3768,7 +3978,7 @@ "Start": { "base": null, "refs": { - "ScanRange$Start": "Specifies the start of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is 0. If only start is supplied, it means scan from that point to the end of the file.For example; <scanrange><start>50</start></scanrange>
means scan from byte 50 until the end of the file.
Specifies the start of the byte range. This parameter is optional. Valid values: non-negative integers. The default value is 0. If only start
is supplied, it means scan from that point to the end of the file. For example, <scanrange><start>50</start></scanrange>
means scan from byte 50 until the end of the file.
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
", "CreateMultipartUploadRequest$StorageClass": "By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
", "Destination$StorageClass": "The storage class to use when replicating objects, such as S3 Standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.
For valid values, see the StorageClass
element of the PUT Bucket replication action in the Amazon S3 API Reference.
Provides the storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.
For more information, see Storage Classes.
", "GetObjectOutput$StorageClass": "Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.
", "HeadObjectOutput$StorageClass": "Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.
For more information, see Storage Classes.
", "InvalidObjectState$StorageClass": null, @@ -3803,7 +4014,7 @@ "MultipartUpload$StorageClass": "The class of storage used to store the object.
", "PutObjectRequest$StorageClass": "By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
", "S3Location$StorageClass": "The class of storage used to store the restore results.
", - "WriteGetObjectResponseRequest$StorageClass": "The class of storage used to store object in Amazon S3.
" + "WriteGetObjectResponseRequest$StorageClass": "Provides storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.
For more information, see Storage Classes.
" } }, "StorageClassAnalysis": { @@ -3931,8 +4142,8 @@ "refs": { "ListBucketAnalyticsConfigurationsOutput$ContinuationToken": "The marker that is used as a starting point for this analytics configuration list response. This value is present if it was sent in the request.
", "ListBucketAnalyticsConfigurationsRequest$ContinuationToken": "The ContinuationToken that represents a placeholder from where this request should begin.
", - "ListBucketIntelligentTieringConfigurationsOutput$ContinuationToken": "The ContinuationToken that represents a placeholder from where this request should begin.
", - "ListBucketIntelligentTieringConfigurationsRequest$ContinuationToken": "The ContinuationToken that represents a placeholder from where this request should begin.
", + "ListBucketIntelligentTieringConfigurationsOutput$ContinuationToken": "The ContinuationToken
that represents a placeholder from where this request should begin.
The ContinuationToken
that represents a placeholder from where this request should begin.
If sent in the request, the marker that is used as a starting point for this inventory configuration list response.
", "ListBucketInventoryConfigurationsRequest$ContinuationToken": "The marker used to continue an inventory configuration listing that has been truncated. Use the NextContinuationToken from a previously truncated list response to continue the listing. The continuation token is an opaque value that Amazon S3 understands.
", "ListBucketMetricsConfigurationsOutput$ContinuationToken": "The marker that is used as a starting point for this metrics configuration list response. This value is present if it was sent in the request.
", diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 5f8e6731e3f..6cababa94fa 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -84,13 +84,10 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket", - "CreateBucketConfiguration": { - "LocationConstraint": "eu-west-1" - } + "Bucket": "examplebucket" }, "output": { - "Location": "http://examplebucket.Indicates the algorithm you want Amazon S3 to use to create the checksum. For more information see Checking object integrity in the Amazon S3 User Guide.
" + } + }, "S3ContentLength": { "base": null, "refs": { diff --git a/models/apis/transfer/2018-11-05/paginators-1.json b/models/apis/transfer/2018-11-05/paginators-1.json index c839e62e986..0a5276f7650 100644 --- a/models/apis/transfer/2018-11-05/paginators-1.json +++ b/models/apis/transfer/2018-11-05/paginators-1.json @@ -2,38 +2,57 @@ "pagination": { "ListAccesses": { "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ServerId" + ], "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "Accesses" }, "ListExecutions": { "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "WorkflowId" + ], "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "Executions" }, "ListSecurityPolicies": { "input_token": "NextToken", + "limit_key": "MaxResults", "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "SecurityPolicyNames" }, "ListServers": { "input_token": "NextToken", + "limit_key": "MaxResults", "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "Servers" }, "ListTagsForResource": { "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "Arn" + ], "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "Tags" }, "ListUsers": { "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ServerId" + ], "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "Users" }, "ListWorkflows": { "input_token": "NextToken", + "limit_key": "MaxResults", "output_token": "NextToken", - "limit_key": "MaxResults" + "result_key": "Workflows" } } -} +} \ No newline at end of file diff --git a/private/model/api/customization_passes.go b/private/model/api/customization_passes.go index de7708bb589..c32f65cec5e 100644 --- a/private/model/api/customization_passes.go +++ b/private/model/api/customization_passes.go @@ -88,6 +88,62 @@ func (a *API) customizationPasses() error { } } + if err := addHTTPChecksumCustomDocumentation(a); err != nil { + if err != nil { + return fmt.Errorf("service httpChecksum trait customization failed, %s: %v", + a.PackageName(), err) + } + } + + return nil +} + +func addHTTPChecksumCustomDocumentation(a *API) error { + for opName, o := range a.Operations { + if o.HTTPChecksum.RequestAlgorithmMember != "" { + ref := o.InputRef.Shape.GetModeledMember(o.HTTPChecksum.RequestAlgorithmMember) + if ref == nil { + return fmt.Errorf( + "expect httpChecksum.RequestAlgorithmMember %v to be modeled input member for %v", + o.HTTPChecksum.RequestAlgorithmMember, + opName, + ) + } + + ref.Documentation = AppendDocstring(ref.Documentation, ` + The AWS SDK for Go v1 does not support automatic computing + request payload checksum. This feature is available in the AWS + SDK for Go v2. If a value is specified for this parameter, the + matching algorithm's checksum member must be populated with the + algorithm's checksum of the request payload. + `) + if o.RequestChecksumRequired() { + ref.Documentation = AppendDocstring(ref.Documentation, ` + The SDK will automatically compute the Content-MD5 checksum + for this operation. The AWS SDK for Go v2 allows you to + configure alternative checksum algorithm to be used. + `) + } + } + + if o.HTTPChecksum.RequestValidationModeMember != "" { + ref := o.InputRef.Shape.GetModeledMember(o.HTTPChecksum.RequestValidationModeMember) + if ref == nil { + return fmt.Errorf( + "expect httpChecksum.RequestValidationModeMember %v to be modeled input member for %v", + o.HTTPChecksum.RequestValidationModeMember, + opName, + ) + } + + ref.Documentation = AppendDocstring(ref.Documentation, ` + The AWS SDK for Go v1 does not support automatic response + payload checksum validation. This feature is available in the + AWS SDK for Go v2. + `) + } + } + return nil } @@ -196,10 +252,10 @@ func s3CustRemoveHeadObjectModeledErrors(a *API) { if !ok { return } - op.Documentation += ` -// -// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses -// for more information on returned errors.` + op.Documentation = AppendDocstring(op.Documentation, ` + See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses + for more information on returned errors. + `) op.ErrorRefs = []ShapeRef{} } @@ -361,15 +417,22 @@ func generatePresignedURL(a *API, inputShapes []string) { for _, input := range inputShapes { if ref, ok := a.Shapes[input]; ok { ref.MemberRefs["SourceRegion"] = &ShapeRef{ - Documentation: docstring(`SourceRegion is the source region where the resource exists. This is not sent over the wire and is only used for presigning. This value should always have the same region as the source ARN.`), - ShapeName: "String", - Shape: a.Shapes["String"], - Ignore: true, + Documentation: docstring(` + SourceRegion is the source region where the resource exists. + This is not sent over the wire and is only used for presigning. + This value should always have the same region as the source + ARN. + `), + ShapeName: "String", + Shape: a.Shapes["String"], + Ignore: true, } ref.MemberRefs["DestinationRegion"] = &ShapeRef{ - Documentation: docstring(`DestinationRegion is used for presigning the request to a given region.`), - ShapeName: "String", - Shape: a.Shapes["String"], + Documentation: docstring(` + DestinationRegion is used for presigning the request to a given region. + `), + ShapeName: "String", + Shape: a.Shapes["String"], } } } diff --git a/private/model/api/docstring.go b/private/model/api/docstring.go index 353ada02c28..a5bb937b6d2 100644 --- a/private/model/api/docstring.go +++ b/private/model/api/docstring.go @@ -544,3 +544,11 @@ func getHTMLTokenAttr(attr []xhtml.Attribute, name string) (string, bool) { } return "", false } + +func AppendDocstring(base, toAdd string) string { + if base != "" { + base += "\n//\n" + } + + return base + docstring(toAdd) +} diff --git a/private/model/api/operation.go b/private/model/api/operation.go index e06398e4c63..a5429cc5898 100644 --- a/private/model/api/operation.go +++ b/private/model/api/operation.go @@ -32,10 +32,26 @@ type Operation struct { EventStreamAPI *EventStreamAPI - IsEndpointDiscoveryOp bool `json:"endpointoperation"` - EndpointDiscovery *EndpointDiscovery `json:"endpointdiscovery"` - Endpoint *EndpointTrait `json:"endpoint"` - IsHttpChecksumRequired bool `json:"httpChecksumRequired"` + IsEndpointDiscoveryOp bool `json:"endpointoperation"` + EndpointDiscovery *EndpointDiscovery `json:"endpointdiscovery"` + Endpoint *EndpointTrait `json:"endpoint"` + + // HTTPChecksum replaces usage of httpChecksumRequired, but some APIs + // (s3control) still uses old trait. + HTTPChecksum HTTPChecksum `json:"httpChecksum"` + IsHttpChecksumRequired bool `json:"httpChecksumRequired"` +} + +type HTTPChecksum struct { + RequestAlgorithmMember string `json:"requestAlgorithmMember"` + RequestValidationModeMember string `json:"requestValidationModeMember"` + RequestChecksumRequired bool `json:"requestChecksumRequired"` +} + +// RequestChecksumRequired returns if the request requires the Content-MD5 +// checksum to be computed. +func (o *Operation) RequestChecksumRequired() bool { + return o.HTTPChecksum.RequestChecksumRequired || o.IsHttpChecksumRequired } // EndpointTrait provides the structure of the modeled endpoint trait, and its @@ -334,7 +350,7 @@ func (c *{{ .API.StructName }}) {{ .ExportedName }}Request(` + req.Handlers.Build.PushBackNamed({{ $handler }}) {{- end }} - {{- if .IsHttpChecksumRequired }} + {{- if .RequestChecksumRequired }} {{- $_ := .API.AddSDKImport "private/checksum" }} req.Handlers.Build.PushBackNamed(request.NamedHandler{ Name: "contentMd5Handler", diff --git a/private/model/api/passes.go b/private/model/api/passes.go index 6a5b579d16c..5c741c479bf 100644 --- a/private/model/api/passes.go +++ b/private/model/api/passes.go @@ -335,6 +335,7 @@ func (a *API) renameExportable() { member.LocationName = mName } } + member.OriginalMemberName = mName if newName == "_" { panic("Shape " + s.ShapeName + " uses reserved member name '_'") diff --git a/private/model/api/s3manger_input.go b/private/model/api/s3manger_input.go index dc90ca62fd4..6f40e59a79f 100644 --- a/private/model/api/s3manger_input.go +++ b/private/model/api/s3manger_input.go @@ -38,6 +38,33 @@ var s3managerUploadInputTmpl = template.Must( template.New("s3managerUploadInputTmpl"). Funcs(template.FuncMap{ "GetDeprecatedMsg": getDeprecatedMessage, + "GetDocstring": func(parent *Shape, memberName string, ref *ShapeRef) string { + doc := ref.Docstring() + if ref.Deprecated { + doc = AppendDocstring(doc, fmt.Sprintf(` + Deprecated: %s + `, getDeprecatedMessage(ref.DeprecatedMsg, memberName))) + } + if parent.WillRefBeBase64Encoded(memberName) { + doc = AppendDocstring(doc, fmt.Sprintf(` + %s is automatically base64 encoded/decoded by the SDK. + `, memberName)) + } + if parent.IsRequired(memberName) { + doc = AppendDocstring(doc, fmt.Sprintf(` + %s is a required field + `, memberName)) + } + if memberName == "ContentMD5" { + doc = AppendDocstring(doc, fmt.Sprintf(` + If the ContentMD5 is provided for a multipart upload, it + will be ignored. Objects that will be uploaded in a single + part, the ContentMD5 will be used. + `)) + } + + return doc + }, }). Parse(s3managerUploadInputTmplDef), ) @@ -47,6 +74,14 @@ const s3managerUploadInputTmplDef = ` // to an object in an Amazon S3 bucket. This type is similar to the s3 // package's PutObjectInput with the exception that the Body member is an // io.Reader instead of an io.ReadSeeker. +// +// The ContentMD5 member for pre-computed MD5 checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, the +// ContentMD5 will be used. +// +// The Checksum members for pre-computed checksums will be ignored for +// multipart uploads. Objects that will be uploaded in a single part, will +// include the checksum member in the request. type UploadInput struct { _ struct{} {{ .GoTags true false }} @@ -57,29 +92,12 @@ type UploadInput struct { {{ else if eq $name "ContentLength" }} {{/* S3 Upload Manager does not use modeled content length */}} {{ else }} - {{ $isBlob := $.WillRefBeBase64Encoded $name -}} {{ $isRequired := $.IsRequired $name -}} - {{ $doc := $ref.Docstring -}} + {{ $doc := GetDocstring $ $name $ref -}} {{ if $doc -}} {{ $doc }} - {{ if $ref.Deprecated -}} - // - // Deprecated: {{ GetDeprecatedMsg $ref.DeprecatedMsg $name }} - {{ end -}} - {{ end -}} - {{ if $isBlob -}} - {{ if $doc -}} - // - {{ end -}} - // {{ $name }} is automatically base64 encoded/decoded by the SDK. - {{ end -}} - {{ if $isRequired -}} - {{ if or $doc $isBlob -}} - // - {{ end -}} - // {{ $name }} is a required field - {{ end -}} + {{- end }} {{ $name }} {{ $.GoStructType $name $ref }} {{ $ref.GoTags false $isRequired }} {{ end }} {{ end }} diff --git a/private/model/api/shape.go b/private/model/api/shape.go index f4652864852..248da6757a2 100644 --- a/private/model/api/shape.go +++ b/private/model/api/shape.go @@ -40,6 +40,11 @@ type ShapeRef struct { Flattened bool Streaming bool XMLAttribute bool + + // References of struct members will include their originally modeled + // member name for cross references. + OriginalMemberName string `json:"-"` + // Ignore, if set, will not be sent over the wire Ignore bool XMLNamespace XMLInfo @@ -77,9 +82,15 @@ type ShapeRef struct { // A Shape defines the definition of a shape type type Shape struct { - API *API `json:"-"` - ShapeName string - Documentation string `json:"-"` + API *API `json:"-"` + ShapeName string + Documentation string `json:"-"` + + // References of struct members will include their originally modeled + // member name for cross references. + OriginalShapeName string `json:"-"` + + // Map of exported member names to the ShapeReference. MemberRefs map[string]*ShapeRef `json:"members"` MemberRef ShapeRef `json:"member"` // List ref KeyRef ShapeRef `json:"key"` // map key ref @@ -227,6 +238,10 @@ func (s *Shape) Rename(newName string) { r.ShapeName = newName } + if s.OriginalShapeName == "" { + s.OriginalShapeName = s.ShapeName + } + delete(s.API.Shapes, s.ShapeName) s.API.Shapes[newName] = s s.ShapeName = newName @@ -243,13 +258,25 @@ func (s *Shape) MemberNames() []string { return names } -// HasMember will return whether or not the shape has a given -// member by name. +// HasMember will return whether or not the shape has a given member by name. +// Name passed in must match the SDK's exported name for the member, not the +// modeled member name func (s *Shape) HasMember(name string) bool { _, ok := s.MemberRefs[name] return ok } +// GetModeledMember returns the member's ShapeReference if it exists within the +// shape. Returns nil if the member could not be found. +func (s *Shape) GetModeledMember(name string) *ShapeRef { + for _, ref := range s.MemberRefs { + if ref.OriginalMemberName == name { + return ref + } + } + return nil +} + // GoTypeWithPkgName returns a shape's type as a string with the package name in //
// tag of the error XML response for a corresponding GetObject call. Cannot
// be used with a successful StatusCode header or when the transformed object
- // is provided in the body. All error codes from S3 are sentence-cased. Regex
- // value is "^[A-Z][a-zA-Z]+$".
+ // is provided in the body. All error codes from S3 are sentence-cased. The
+ // regular expression (regex) value is "^[A-Z][a-zA-Z]+$".
ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"`
// Contains a generic description of the error condition. Returned in the
@@ -38132,9 +40459,10 @@ type WriteGetObjectResponseInput struct {
// is provided in body.
ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"`
- // If object stored in Amazon S3 expiration is configured (see PUT Bucket lifecycle)
- // it includes expiry-date and rule-id key-value pairs providing object expiration
- // information. The value of the rule-id is URL encoded.
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
+ // that provide the object expiration information. The value of the rule-id
+ // is URL-encoded.
Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"`
// The date and time at which the object is no longer cacheable.
@@ -38245,7 +40573,10 @@ type WriteGetObjectResponseInput struct {
// * 503 - Service Unavailable
StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"`
- // The class of storage used to store object in Amazon S3.
+ // Provides storage class information of the object. Amazon S3 returns this
+ // header for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html).
StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"`
// The number of tags, if any, on the object.
@@ -38316,6 +40647,30 @@ func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectR
return s
}
+// SetChecksumCRC32 sets the ChecksumCRC32 field's value.
+func (s *WriteGetObjectResponseInput) SetChecksumCRC32(v string) *WriteGetObjectResponseInput {
+ s.ChecksumCRC32 = &v
+ return s
+}
+
+// SetChecksumCRC32C sets the ChecksumCRC32C field's value.
+func (s *WriteGetObjectResponseInput) SetChecksumCRC32C(v string) *WriteGetObjectResponseInput {
+ s.ChecksumCRC32C = &v
+ return s
+}
+
+// SetChecksumSHA1 sets the ChecksumSHA1 field's value.
+func (s *WriteGetObjectResponseInput) SetChecksumSHA1(v string) *WriteGetObjectResponseInput {
+ s.ChecksumSHA1 = &v
+ return s
+}
+
+// SetChecksumSHA256 sets the ChecksumSHA256 field's value.
+func (s *WriteGetObjectResponseInput) SetChecksumSHA256(v string) *WriteGetObjectResponseInput {
+ s.ChecksumSHA256 = &v
+ return s
+}
+
// SetContentDisposition sets the ContentDisposition field's value.
func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput {
s.ContentDisposition = &v
@@ -38748,6 +41103,42 @@ func BucketVersioningStatus_Values() []string {
}
}
+const (
+ // ChecksumAlgorithmCrc32 is a ChecksumAlgorithm enum value
+ ChecksumAlgorithmCrc32 = "CRC32"
+
+ // ChecksumAlgorithmCrc32c is a ChecksumAlgorithm enum value
+ ChecksumAlgorithmCrc32c = "CRC32C"
+
+ // ChecksumAlgorithmSha1 is a ChecksumAlgorithm enum value
+ ChecksumAlgorithmSha1 = "SHA1"
+
+ // ChecksumAlgorithmSha256 is a ChecksumAlgorithm enum value
+ ChecksumAlgorithmSha256 = "SHA256"
+)
+
+// ChecksumAlgorithm_Values returns all elements of the ChecksumAlgorithm enum
+func ChecksumAlgorithm_Values() []string {
+ return []string{
+ ChecksumAlgorithmCrc32,
+ ChecksumAlgorithmCrc32c,
+ ChecksumAlgorithmSha1,
+ ChecksumAlgorithmSha256,
+ }
+}
+
+const (
+ // ChecksumModeEnabled is a ChecksumMode enum value
+ ChecksumModeEnabled = "ENABLED"
+)
+
+// ChecksumMode_Values returns all elements of the ChecksumMode enum
+func ChecksumMode_Values() []string {
+ return []string{
+ ChecksumModeEnabled,
+ }
+}
+
const (
// CompressionTypeNone is a CompressionType enum value
CompressionTypeNone = "NONE"
@@ -39119,6 +41510,9 @@ const (
// InventoryOptionalFieldBucketKeyStatus is a InventoryOptionalField enum value
InventoryOptionalFieldBucketKeyStatus = "BucketKeyStatus"
+
+ // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value
+ InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm"
)
// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum
@@ -39136,6 +41530,7 @@ func InventoryOptionalField_Values() []string {
InventoryOptionalFieldObjectLockLegalHoldStatus,
InventoryOptionalFieldIntelligentTieringAccessTier,
InventoryOptionalFieldBucketKeyStatus,
+ InventoryOptionalFieldChecksumAlgorithm,
}
}
@@ -39219,6 +41614,34 @@ func MetricsStatus_Values() []string {
}
}
+const (
+ // ObjectAttributesEtag is a ObjectAttributes enum value
+ ObjectAttributesEtag = "ETag"
+
+ // ObjectAttributesChecksum is a ObjectAttributes enum value
+ ObjectAttributesChecksum = "Checksum"
+
+ // ObjectAttributesObjectParts is a ObjectAttributes enum value
+ ObjectAttributesObjectParts = "ObjectParts"
+
+ // ObjectAttributesStorageClass is a ObjectAttributes enum value
+ ObjectAttributesStorageClass = "StorageClass"
+
+ // ObjectAttributesObjectSize is a ObjectAttributes enum value
+ ObjectAttributesObjectSize = "ObjectSize"
+)
+
+// ObjectAttributes_Values returns all elements of the ObjectAttributes enum
+func ObjectAttributes_Values() []string {
+ return []string{
+ ObjectAttributesEtag,
+ ObjectAttributesChecksum,
+ ObjectAttributesObjectParts,
+ ObjectAttributesStorageClass,
+ ObjectAttributesObjectSize,
+ }
+}
+
const (
// ObjectCannedACLPrivate is a ObjectCannedACL enum value
ObjectCannedACLPrivate = "private"
@@ -39581,8 +42004,8 @@ func RequestCharged_Values() []string {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
-// about downloading objects from requester pays buckets, see Downloading Objects
-// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+// about downloading objects from Requester Pays buckets, see Downloading Objects
+// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
const (
// RequestPayerRequester is a RequestPayer enum value
diff --git a/service/s3/examples_test.go b/service/s3/examples_test.go
index 99a7251630b..0c2622f4e62 100644
--- a/service/s3/examples_test.go
+++ b/service/s3/examples_test.go
@@ -128,17 +128,13 @@ func ExampleS3_CopyObject_shared00() {
fmt.Println(result)
}
-// To create a bucket in a specific region
+// To create a bucket
//
-// The following example creates a bucket. The request specifies an AWS region where
-// to create the bucket.
+// The following example creates a bucket.
func ExampleS3_CreateBucket_shared00() {
svc := s3.New(session.New())
input := &s3.CreateBucketInput{
Bucket: aws.String("examplebucket"),
- CreateBucketConfiguration: &s3.CreateBucketConfiguration{
- LocationConstraint: aws.String("eu-west-1"),
- },
}
result, err := svc.CreateBucket(input)
@@ -163,13 +159,17 @@ func ExampleS3_CreateBucket_shared00() {
fmt.Println(result)
}
-// To create a bucket
+// To create a bucket in a specific region
//
-// The following example creates a bucket.
+// The following example creates a bucket. The request specifies an AWS region where
+// to create the bucket.
func ExampleS3_CreateBucket_shared01() {
svc := s3.New(session.New())
input := &s3.CreateBucketInput{
Bucket: aws.String("examplebucket"),
+ CreateBucketConfiguration: &s3.CreateBucketConfiguration{
+ LocationConstraint: aws.String("eu-west-1"),
+ },
}
result, err := svc.CreateBucket(input)
@@ -934,16 +934,14 @@ func ExampleS3_GetBucketWebsite_shared00() {
fmt.Println(result)
}
-// To retrieve a byte range of an object
+// To retrieve an object
//
-// The following example retrieves an object for an S3 bucket. The request specifies
-// the range header to retrieve a specific byte range.
+// The following example retrieves an object for an S3 bucket.
func ExampleS3_GetObject_shared00() {
svc := s3.New(session.New())
input := &s3.GetObjectInput{
Bucket: aws.String("examplebucket"),
- Key: aws.String("SampleFile.txt"),
- Range: aws.String("bytes=0-9"),
+ Key: aws.String("HappyFace.jpg"),
}
result, err := svc.GetObject(input)
@@ -968,14 +966,16 @@ func ExampleS3_GetObject_shared00() {
fmt.Println(result)
}
-// To retrieve an object
+// To retrieve a byte range of an object
//
-// The following example retrieves an object for an S3 bucket.
+// The following example retrieves an object for an S3 bucket. The request specifies
+// the range header to retrieve a specific byte range.
func ExampleS3_GetObject_shared01() {
svc := s3.New(session.New())
input := &s3.GetObjectInput{
Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
+ Key: aws.String("SampleFile.txt"),
+ Range: aws.String("bytes=0-9"),
}
result, err := svc.GetObject(input)
@@ -1030,14 +1030,16 @@ func ExampleS3_GetObjectAcl_shared00() {
fmt.Println(result)
}
-// To retrieve tag set of an object
+// To retrieve tag set of a specific object version
//
-// The following example retrieves tag set of an object.
+// The following example retrieves tag set of an object. The request specifies object
+// version.
func ExampleS3_GetObjectTagging_shared00() {
svc := s3.New(session.New())
input := &s3.GetObjectTaggingInput{
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("exampleobject"),
+ VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"),
}
result, err := svc.GetObjectTagging(input)
@@ -1058,16 +1060,14 @@ func ExampleS3_GetObjectTagging_shared00() {
fmt.Println(result)
}
-// To retrieve tag set of a specific object version
+// To retrieve tag set of an object
//
-// The following example retrieves tag set of an object. The request specifies object
-// version.
+// The following example retrieves tag set of an object.
func ExampleS3_GetObjectTagging_shared01() {
svc := s3.New(session.New())
input := &s3.GetObjectTaggingInput{
- Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
- VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
}
result, err := svc.GetObjectTagging(input)
@@ -1201,17 +1201,13 @@ func ExampleS3_ListBuckets_shared00() {
fmt.Println(result)
}
-// List next set of multipart uploads when previous result is truncated
+// To list in-progress multipart uploads on a bucket
//
-// The following example specifies the upload-id-marker and key-marker from previous
-// truncated response to retrieve next setup of multipart uploads.
+// The following example lists in-progress multipart uploads on a specific bucket.
func ExampleS3_ListMultipartUploads_shared00() {
svc := s3.New(session.New())
input := &s3.ListMultipartUploadsInput{
- Bucket: aws.String("examplebucket"),
- KeyMarker: aws.String("nextkeyfrompreviousresponse"),
- MaxUploads: aws.Int64(2),
- UploadIdMarker: aws.String("valuefrompreviousresponse"),
+ Bucket: aws.String("examplebucket"),
}
result, err := svc.ListMultipartUploads(input)
@@ -1232,13 +1228,17 @@ func ExampleS3_ListMultipartUploads_shared00() {
fmt.Println(result)
}
-// To list in-progress multipart uploads on a bucket
+// List next set of multipart uploads when previous result is truncated
//
-// The following example lists in-progress multipart uploads on a specific bucket.
+// The following example specifies the upload-id-marker and key-marker from previous
+// truncated response to retrieve next setup of multipart uploads.
func ExampleS3_ListMultipartUploads_shared01() {
svc := s3.New(session.New())
input := &s3.ListMultipartUploadsInput{
- Bucket: aws.String("examplebucket"),
+ Bucket: aws.String("examplebucket"),
+ KeyMarker: aws.String("nextkeyfrompreviousresponse"),
+ MaxUploads: aws.Int64(2),
+ UploadIdMarker: aws.String("valuefrompreviousresponse"),
}
result, err := svc.ListMultipartUploads(input)
@@ -1808,18 +1808,17 @@ func ExampleS3_PutBucketWebsite_shared00() {
fmt.Println(result)
}
-// To upload an object (specify optional headers)
+// To upload an object and specify optional tags
//
-// The following example uploads an object. The request specifies optional request headers
-// to directs S3 to use specific storage class and use server-side encryption.
+// The following example uploads an object. The request specifies optional object tags.
+// The bucket is versioned, therefore S3 returns version ID of the newly created object.
func ExampleS3_PutObject_shared00() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
- ServerSideEncryption: aws.String("AES256"),
- StorageClass: aws.String("STANDARD_IA"),
+ Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
+ Tagging: aws.String("key1=value1&key2=value2"),
}
result, err := svc.PutObject(input)
@@ -1840,19 +1839,18 @@ func ExampleS3_PutObject_shared00() {
fmt.Println(result)
}
-// To upload an object and specify server-side encryption and object tags
+// To upload an object and specify canned ACL.
//
-// The following example uploads and object. The request specifies the optional server-side
-// encryption option. The request also specifies optional object tags. If the bucket
-// is versioning enabled, S3 returns version ID in response.
+// The following example uploads and object. The request specifies optional canned ACL
+// (access control list) to all READ access to authenticated users. If the bucket is
+// versioning enabled, S3 returns version ID in response.
func ExampleS3_PutObject_shared01() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
- ServerSideEncryption: aws.String("AES256"),
- Tagging: aws.String("key1=value1&key2=value2"),
+ ACL: aws.String("authenticated-read"),
+ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("exampleobject"),
}
result, err := svc.PutObject(input)
@@ -1873,16 +1871,19 @@ func ExampleS3_PutObject_shared01() {
fmt.Println(result)
}
-// To create an object.
+// To upload an object and specify server-side encryption and object tags
//
-// The following example creates an object. If the bucket is versioning enabled, S3
-// returns version ID in response.
+// The following example uploads and object. The request specifies the optional server-side
+// encryption option. The request also specifies optional object tags. If the bucket
+// is versioning enabled, S3 returns version ID in response.
func ExampleS3_PutObject_shared02() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("objectkey"),
+ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("exampleobject"),
+ ServerSideEncryption: aws.String("AES256"),
+ Tagging: aws.String("key1=value1&key2=value2"),
}
result, err := svc.PutObject(input)
@@ -1903,20 +1904,16 @@ func ExampleS3_PutObject_shared02() {
fmt.Println(result)
}
-// To upload object and specify user-defined metadata
+// To create an object.
//
-// The following example creates an object. The request also specifies optional metadata.
-// If the bucket is versioning enabled, S3 returns version ID in response.
+// The following example creates an object. If the bucket is versioning enabled, S3
+// returns version ID in response.
func ExampleS3_PutObject_shared03() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
- Metadata: map[string]*string{
- "metadata1": aws.String("value1"),
- "metadata2": aws.String("value2"),
- },
+ Key: aws.String("objectkey"),
}
result, err := svc.PutObject(input)
@@ -1937,18 +1934,17 @@ func ExampleS3_PutObject_shared03() {
fmt.Println(result)
}
-// To upload an object and specify canned ACL.
+// To upload an object
//
-// The following example uploads and object. The request specifies optional canned ACL
-// (access control list) to all READ access to authenticated users. If the bucket is
-// versioning enabled, S3 returns version ID in response.
+// The following example uploads an object to a versioning-enabled bucket. The source
+// file is specified using Windows file syntax. S3 returns VersionId of the newly created
+// object.
func ExampleS3_PutObject_shared04() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- ACL: aws.String("authenticated-read"),
- Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
+ Key: aws.String("HappyFace.jpg"),
}
result, err := svc.PutObject(input)
@@ -1969,17 +1965,18 @@ func ExampleS3_PutObject_shared04() {
fmt.Println(result)
}
-// To upload an object
+// To upload an object (specify optional headers)
//
-// The following example uploads an object to a versioning-enabled bucket. The source
-// file is specified using Windows file syntax. S3 returns VersionId of the newly created
-// object.
+// The following example uploads an object. The request specifies optional request headers
+// to directs S3 to use specific storage class and use server-side encryption.
func ExampleS3_PutObject_shared05() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
+ Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
+ ServerSideEncryption: aws.String("AES256"),
+ StorageClass: aws.String("STANDARD_IA"),
}
result, err := svc.PutObject(input)
@@ -2000,17 +1997,20 @@ func ExampleS3_PutObject_shared05() {
fmt.Println(result)
}
-// To upload an object and specify optional tags
+// To upload object and specify user-defined metadata
//
-// The following example uploads an object. The request specifies optional object tags.
-// The bucket is versioned, therefore S3 returns version ID of the newly created object.
+// The following example creates an object. The request also specifies optional metadata.
+// If the bucket is versioning enabled, S3 returns version ID in response.
func ExampleS3_PutObject_shared06() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
- Tagging: aws.String("key1=value1&key2=value2"),
+ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("exampleobject"),
+ Metadata: map[string]*string{
+ "metadata1": aws.String("value1"),
+ "metadata2": aws.String("value2"),
+ },
}
result, err := svc.PutObject(input)
@@ -2175,19 +2175,18 @@ func ExampleS3_UploadPart_shared00() {
fmt.Println(result)
}
-// To upload a part by copying byte range from an existing object as data source
+// To upload a part by copying data from an existing object as data source
//
-// The following example uploads a part of a multipart upload by copying a specified
-// byte range from an existing object as data source.
+// The following example uploads a part of a multipart upload by copying data from an
+// existing object as data source.
func ExampleS3_UploadPartCopy_shared00() {
svc := s3.New(session.New())
input := &s3.UploadPartCopyInput{
- Bucket: aws.String("examplebucket"),
- CopySource: aws.String("/bucketname/sourceobjectkey"),
- CopySourceRange: aws.String("bytes=1-100000"),
- Key: aws.String("examplelargeobject"),
- PartNumber: aws.Int64(2),
- UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
+ Bucket: aws.String("examplebucket"),
+ CopySource: aws.String("/bucketname/sourceobjectkey"),
+ Key: aws.String("examplelargeobject"),
+ PartNumber: aws.Int64(1),
+ UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
}
result, err := svc.UploadPartCopy(input)
@@ -2208,18 +2207,19 @@ func ExampleS3_UploadPartCopy_shared00() {
fmt.Println(result)
}
-// To upload a part by copying data from an existing object as data source
+// To upload a part by copying byte range from an existing object as data source
//
-// The following example uploads a part of a multipart upload by copying data from an
-// existing object as data source.
+// The following example uploads a part of a multipart upload by copying a specified
+// byte range from an existing object as data source.
func ExampleS3_UploadPartCopy_shared01() {
svc := s3.New(session.New())
input := &s3.UploadPartCopyInput{
- Bucket: aws.String("examplebucket"),
- CopySource: aws.String("/bucketname/sourceobjectkey"),
- Key: aws.String("examplelargeobject"),
- PartNumber: aws.Int64(1),
- UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
+ Bucket: aws.String("examplebucket"),
+ CopySource: aws.String("/bucketname/sourceobjectkey"),
+ CopySourceRange: aws.String("bytes=1-100000"),
+ Key: aws.String("examplelargeobject"),
+ PartNumber: aws.Int64(2),
+ UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
}
result, err := svc.UploadPartCopy(input)
diff --git a/service/s3/s3iface/interface.go b/service/s3/s3iface/interface.go
index 1e32fb94d5a..d2eeca9afe8 100644
--- a/service/s3/s3iface/interface.go
+++ b/service/s3/s3iface/interface.go
@@ -244,6 +244,10 @@ type S3API interface {
GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
+ GetObjectAttributes(*s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error)
+ GetObjectAttributesWithContext(aws.Context, *s3.GetObjectAttributesInput, ...request.Option) (*s3.GetObjectAttributesOutput, error)
+ GetObjectAttributesRequest(*s3.GetObjectAttributesInput) (*request.Request, *s3.GetObjectAttributesOutput)
+
GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error)
GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error)
GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput)
diff --git a/service/s3/s3manager/upload.go b/service/s3/s3manager/upload.go
index 9fa98fa2f6f..47d29bb84a6 100644
--- a/service/s3/s3manager/upload.go
+++ b/service/s3/s3manager/upload.go
@@ -124,6 +124,14 @@ func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
// The Uploader structure that calls Upload(). It is safe to call Upload()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Uploader's properties is not safe to be done concurrently.
+//
+// The ContentMD5 member for pre-computed MD5 checksums will be ignored for
+// multipart uploads. Objects that will be uploaded in a single part, the
+// ContentMD5 will be used.
+//
+// The Checksum members for pre-computed checksums will be ignored for
+// multipart uploads. Objects that will be uploaded in a single part, will
+// include the checksum member in the request.
type Uploader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
diff --git a/service/s3/s3manager/upload_input.go b/service/s3/s3manager/upload_input.go
index 45f414fa3c0..1cd115f48ce 100644
--- a/service/s3/s3manager/upload_input.go
+++ b/service/s3/s3manager/upload_input.go
@@ -11,6 +11,14 @@ import (
// to an object in an Amazon S3 bucket. This type is similar to the s3
// package's PutObjectInput with the exception that the Body member is an
// io.Reader instead of an io.ReadSeeker.
+//
+// The ContentMD5 member for pre-computed MD5 checksums will be ignored for
+// multipart uploads. Objects that will be uploaded in a single part, the
+// ContentMD5 will be used.
+//
+// The Checksum members for pre-computed checksums will be ignored for
+// multipart uploads. Objects that will be uploaded in a single part, will
+// include the checksum member in the request.
type UploadInput struct {
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
@@ -35,9 +43,9 @@ type UploadInput struct {
// When using this action with Amazon S3 on Outposts, you must direct requests
// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // using this action using S3 on Outposts through the Amazon Web Services SDKs,
+ // using this action with S3 on Outposts through the Amazon Web Services SDKs,
// you provide the Outposts bucket ARN in place of the bucket name. For more
- // information about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -57,6 +65,51 @@ type UploadInput struct {
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+ // Indicates the algorithm used to create the checksum for the object when using
+ // the SDK. This header will not provide any additional functionality if not
+ // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
+ // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
+ // the HTTP status code 400 Bad Request. For more information, see Checking
+ // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
+ // parameter.
+ //
+ // The AWS SDK for Go v1 does not support automatic computing request payload
+ // checksum. This feature is available in the AWS SDK for Go v2. If a value
+ // is specified for this parameter, the matching algorithm's checksum member
+ // must be populated with the algorithm's checksum of the request payload.
+ ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies
+ // the base64-encoded, 32-bit CRC32 checksum of the object. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // in the Amazon S3 User Guide.
+ ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"`
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies
+ // the base64-encoded, 32-bit CRC32C checksum of the object. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // in the Amazon S3 User Guide.
+ ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"`
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies
+ // the base64-encoded, 160-bit SHA-1 digest of the object. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // in the Amazon S3 User Guide.
+ ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"`
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies
+ // the base64-encoded, 256-bit SHA-256 digest of the object. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // in the Amazon S3 User Guide.
+ ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"`
+
// Specifies presentational information for the object. For more information,
// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1).
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
@@ -76,6 +129,9 @@ type UploadInput struct {
// it is optional, we recommend using the Content-MD5 mechanism as an end-to-end
// integrity check. For more information about REST request authentication,
// see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
+ //
+ // If the ContentMD5 is provided for a multipart upload, it will be ignored.
+ // Objects that will be uploaded in a single part, the ContentMD5 will be used.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
// A standard MIME type describing the format of the contents. For more information,
@@ -83,8 +139,8 @@ type UploadInput struct {
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
// The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request will fail with an HTTP 403 (Access Denied)
- // error.
+ // different account, the request fails with the HTTP status code 403 Forbidden
+ // (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable. For more information,
@@ -132,8 +188,8 @@ type UploadInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from requester pays buckets, see Downloading Objects
- // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
+ // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
diff --git a/service/s3control/api.go b/service/s3control/api.go
index a5d63e8e830..4b799b76a7a 100644
--- a/service/s3control/api.go
+++ b/service/s3control/api.go
@@ -16870,6 +16870,11 @@ type S3CopyObjectOperation struct {
CannedAccessControlList *string `type:"string" enum:"S3CannedAccessControlList"`
+ // Indicates the algorithm you want Amazon S3 to use to create the checksum.
+ // For more information see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CheckingObjectIntegrity.xml)
+ // in the Amazon S3 User Guide.
+ ChecksumAlgorithm *string `type:"string" enum:"S3ChecksumAlgorithm"`
+
MetadataDirective *string `type:"string" enum:"S3MetadataDirective"`
ModifiedSinceConstraint *time.Time `type:"timestamp"`
@@ -16998,6 +17003,12 @@ func (s *S3CopyObjectOperation) SetCannedAccessControlList(v string) *S3CopyObje
return s
}
+// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value.
+func (s *S3CopyObjectOperation) SetChecksumAlgorithm(v string) *S3CopyObjectOperation {
+ s.ChecksumAlgorithm = &v
+ return s
+}
+
// SetMetadataDirective sets the MetadataDirective field's value.
func (s *S3CopyObjectOperation) SetMetadataDirective(v string) *S3CopyObjectOperation {
s.MetadataDirective = &v
@@ -19628,6 +19639,30 @@ func S3CannedAccessControlList_Values() []string {
}
}
+const (
+ // S3ChecksumAlgorithmCrc32 is a S3ChecksumAlgorithm enum value
+ S3ChecksumAlgorithmCrc32 = "CRC32"
+
+ // S3ChecksumAlgorithmCrc32c is a S3ChecksumAlgorithm enum value
+ S3ChecksumAlgorithmCrc32c = "CRC32C"
+
+ // S3ChecksumAlgorithmSha1 is a S3ChecksumAlgorithm enum value
+ S3ChecksumAlgorithmSha1 = "SHA1"
+
+ // S3ChecksumAlgorithmSha256 is a S3ChecksumAlgorithm enum value
+ S3ChecksumAlgorithmSha256 = "SHA256"
+)
+
+// S3ChecksumAlgorithm_Values returns all elements of the S3ChecksumAlgorithm enum
+func S3ChecksumAlgorithm_Values() []string {
+ return []string{
+ S3ChecksumAlgorithmCrc32,
+ S3ChecksumAlgorithmCrc32c,
+ S3ChecksumAlgorithmSha1,
+ S3ChecksumAlgorithmSha256,
+ }
+}
+
const (
// S3GlacierJobTierBulk is a S3GlacierJobTier enum value
S3GlacierJobTierBulk = "BULK"
From f77d1c4980a6340d327944671fb6dbeb2a49d8d4 Mon Sep 17 00:00:00 2001
From: aws-sdk-go-automation
<43143561+aws-sdk-go-automation@users.noreply.github.com>
Date: Fri, 25 Feb 2022 11:19:06 -0800
Subject: [PATCH 4/4] Release v1.43.7 (2022-02-25) (#4295)
Release v1.43.7 (2022-02-25)
===
### Service Client Updates
* `service/elasticache`: Updates service documentation
* Doc only update for ElastiCache
* `service/panorama`: Updates service API and documentation
---
CHANGELOG.md | 8 +
aws/endpoints/defaults.go | 55 ++
aws/version.go | 2 +-
.../apis/elasticache/2015-02-02/docs-2.json | 2 +-
models/apis/panorama/2019-07-24/api-2.json | 807 ++++++++++--------
models/apis/panorama/2019-07-24/docs-2.json | 83 +-
models/endpoints/endpoints.json | 21 +
service/elasticache/api.go | 5 -
service/panorama/api.go | 447 +++++++---
9 files changed, 933 insertions(+), 497 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7c02045d5e5..c2066850c97 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,11 @@
+Release v1.43.7 (2022-02-25)
+===
+
+### Service Client Updates
+* `service/elasticache`: Updates service documentation
+ * Doc only update for ElastiCache
+* `service/panorama`: Updates service API and documentation
+
Release v1.43.6 (2022-02-24)
===
diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go
index d81b107f167..59910aecf0c 100644
--- a/aws/endpoints/defaults.go
+++ b/aws/endpoints/defaults.go
@@ -814,6 +814,61 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "amplifyuibuilder": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"api.detective": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
diff --git a/aws/version.go b/aws/version.go
index e44a2395e9e..af67d69a7e6 100644
--- a/aws/version.go
+++ b/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.43.6"
+const SDKVersion = "1.43.7"
diff --git a/models/apis/elasticache/2015-02-02/docs-2.json b/models/apis/elasticache/2015-02-02/docs-2.json
index 8eb1faec1c8..b5a8b65184c 100644
--- a/models/apis/elasticache/2015-02-02/docs-2.json
+++ b/models/apis/elasticache/2015-02-02/docs-2.json
@@ -2069,7 +2069,7 @@
"CreateCacheClusterMessage$CacheClusterId": "The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
-
A name must contain from 1 to 50 alphanumeric characters or hyphens.
-
The first character must be a letter.
-
A name cannot end with a hyphen or contain two consecutive hyphens.
",
"CreateCacheClusterMessage$ReplicationGroupId": "The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.
This parameter is only valid if the Engine
parameter is redis
.
",
"CreateCacheClusterMessage$PreferredAvailabilityZone": "The EC2 Availability Zone in which the cluster is created.
All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones
.
Default: System chosen Availability Zone.
",
- "CreateCacheClusterMessage$CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
-
General purpose:
-
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 6.0 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
-
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
-
Compute optimized:
-
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
-
Memory optimized with data tiering:
-
Current generation:
R6gd node types (available only for Redis engine version 6.2 onward).
cache.r6gd.xlarge
, cache.r6gd.2xlarge
, cache.r6gd.4xlarge
, cache.r6gd.8xlarge
, cache.r6gd.12xlarge
, cache.r6gd.16xlarge
-
Memory optimized:
-
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
-
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
-
All current generation instance types are created in Amazon VPC by default.
-
Redis append-only files (AOF) are not supported for T1 or T2 instances.
-
Redis Multi-AZ with automatic failover is not supported on T1 instances.
-
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
",
+ "CreateCacheClusterMessage$CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).
The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.
-
General purpose:
-
Current generation:
M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large
, cache.m6g.xlarge
, cache.m6g.2xlarge
, cache.m6g.4xlarge
, cache.m6g.8xlarge
, cache.m6g.12xlarge
, cache.m6g.16xlarge
For region availability, see Supported Node Types
M5 node types: cache.m5.large
, cache.m5.xlarge
, cache.m5.2xlarge
, cache.m5.4xlarge
, cache.m5.12xlarge
, cache.m5.24xlarge
M4 node types: cache.m4.large
, cache.m4.xlarge
, cache.m4.2xlarge
, cache.m4.4xlarge
, cache.m4.10xlarge
T4g node types (available only for Redis engine version 6.0 onward and Memcached engine version 1.5.16 onward): cache.t4g.micro
, cache.t4g.small
, cache.t4g.medium
T3 node types: cache.t3.micro
, cache.t3.small
, cache.t3.medium
T2 node types: cache.t2.micro
, cache.t2.small
, cache.t2.medium
-
Previous generation: (not recommended)
T1 node types: cache.t1.micro
M1 node types: cache.m1.small
, cache.m1.medium
, cache.m1.large
, cache.m1.xlarge
M3 node types: cache.m3.medium
, cache.m3.large
, cache.m3.xlarge
, cache.m3.2xlarge
-
Compute optimized:
-
Previous generation: (not recommended)
C1 node types: cache.c1.xlarge
-
Memory optimized:
-
Current generation:
R6g node types (available only for Redis engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward).
cache.r6g.large
, cache.r6g.xlarge
, cache.r6g.2xlarge
, cache.r6g.4xlarge
, cache.r6g.8xlarge
, cache.r6g.12xlarge
, cache.r6g.16xlarge
For region availability, see Supported Node Types
R5 node types: cache.r5.large
, cache.r5.xlarge
, cache.r5.2xlarge
, cache.r5.4xlarge
, cache.r5.12xlarge
, cache.r5.24xlarge
R4 node types: cache.r4.large
, cache.r4.xlarge
, cache.r4.2xlarge
, cache.r4.4xlarge
, cache.r4.8xlarge
, cache.r4.16xlarge
-
Previous generation: (not recommended)
M2 node types: cache.m2.xlarge
, cache.m2.2xlarge
, cache.m2.4xlarge
R3 node types: cache.r3.large
, cache.r3.xlarge
, cache.r3.2xlarge
, cache.r3.4xlarge
, cache.r3.8xlarge
Additional node type info
-
All current generation instance types are created in Amazon VPC by default.
-
Redis append-only files (AOF) are not supported for T1 or T2 instances.
-
Redis Multi-AZ with automatic failover is not supported on T1 instances.
-
Redis configuration variables appendonly
and appendfsync
are not supported on Redis version 2.8.22 and later.
",
"CreateCacheClusterMessage$Engine": "The name of the cache engine to be used for this cluster.
Valid values for this parameter are: memcached
| redis
",
"CreateCacheClusterMessage$EngineVersion": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.
",
"CreateCacheClusterMessage$CacheParameterGroupName": "The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes'
when creating a cluster.
",
diff --git a/models/apis/panorama/2019-07-24/api-2.json b/models/apis/panorama/2019-07-24/api-2.json
index c0d61b79178..ace59a96091 100644
--- a/models/apis/panorama/2019-07-24/api-2.json
+++ b/models/apis/panorama/2019-07-24/api-2.json
@@ -17,272 +17,290 @@
"name":"CreateApplicationInstance",
"http":{
"method":"POST",
- "requestUri":"/application-instances"
+ "requestUri":"/application-instances",
+ "responseCode":200
},
"input":{"shape":"CreateApplicationInstanceRequest"},
"output":{"shape":"CreateApplicationInstanceResponse"},
"errors":[
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ServiceQuotaExceededException"}
+ {"shape":"ServiceQuotaExceededException"},
+ {"shape":"InternalServerException"}
]
},
"CreateJobForDevices":{
"name":"CreateJobForDevices",
"http":{
"method":"POST",
- "requestUri":"/jobs"
+ "requestUri":"/jobs",
+ "responseCode":200
},
"input":{"shape":"CreateJobForDevicesRequest"},
"output":{"shape":"CreateJobForDevicesResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"CreateNodeFromTemplateJob":{
"name":"CreateNodeFromTemplateJob",
"http":{
"method":"POST",
- "requestUri":"/packages/template-job"
+ "requestUri":"/packages/template-job",
+ "responseCode":200
},
"input":{"shape":"CreateNodeFromTemplateJobRequest"},
"output":{"shape":"CreateNodeFromTemplateJobResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"CreatePackage":{
"name":"CreatePackage",
"http":{
"method":"POST",
- "requestUri":"/packages"
+ "requestUri":"/packages",
+ "responseCode":200
},
"input":{"shape":"CreatePackageRequest"},
"output":{"shape":"CreatePackageResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"CreatePackageImportJob":{
"name":"CreatePackageImportJob",
"http":{
"method":"POST",
- "requestUri":"/packages/import-jobs"
+ "requestUri":"/packages/import-jobs",
+ "responseCode":200
},
"input":{"shape":"CreatePackageImportJobRequest"},
"output":{"shape":"CreatePackageImportJobResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"DeleteDevice":{
"name":"DeleteDevice",
"http":{
"method":"DELETE",
- "requestUri":"/devices/{DeviceId}"
+ "requestUri":"/devices/{DeviceId}",
+ "responseCode":200
},
"input":{"shape":"DeleteDeviceRequest"},
"output":{"shape":"DeleteDeviceResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DeletePackage":{
"name":"DeletePackage",
"http":{
"method":"DELETE",
- "requestUri":"/packages/{PackageId}"
+ "requestUri":"/packages/{PackageId}",
+ "responseCode":200
},
"input":{"shape":"DeletePackageRequest"},
"output":{"shape":"DeletePackageResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DeregisterPackageVersion":{
"name":"DeregisterPackageVersion",
"http":{
"method":"DELETE",
- "requestUri":"/packages/{PackageId}/versions/{PackageVersion}/patch/{PatchVersion}"
+ "requestUri":"/packages/{PackageId}/versions/{PackageVersion}/patch/{PatchVersion}",
+ "responseCode":200
},
"input":{"shape":"DeregisterPackageVersionRequest"},
"output":{"shape":"DeregisterPackageVersionResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DescribeApplicationInstance":{
"name":"DescribeApplicationInstance",
"http":{
"method":"GET",
- "requestUri":"/application-instances/{applicationInstanceId}"
+ "requestUri":"/application-instances/{ApplicationInstanceId}",
+ "responseCode":200
},
"input":{"shape":"DescribeApplicationInstanceRequest"},
"output":{"shape":"DescribeApplicationInstanceResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DescribeApplicationInstanceDetails":{
"name":"DescribeApplicationInstanceDetails",
"http":{
"method":"GET",
- "requestUri":"/application-instances/{applicationInstanceId}/details"
+ "requestUri":"/application-instances/{ApplicationInstanceId}/details",
+ "responseCode":200
},
"input":{"shape":"DescribeApplicationInstanceDetailsRequest"},
"output":{"shape":"DescribeApplicationInstanceDetailsResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DescribeDevice":{
"name":"DescribeDevice",
"http":{
"method":"GET",
- "requestUri":"/devices/{DeviceId}"
+ "requestUri":"/devices/{DeviceId}",
+ "responseCode":200
},
"input":{"shape":"DescribeDeviceRequest"},
"output":{"shape":"DescribeDeviceResponse"},
"errors":[
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DescribeDeviceJob":{
"name":"DescribeDeviceJob",
"http":{
"method":"GET",
- "requestUri":"/jobs/{JobId}"
+ "requestUri":"/jobs/{JobId}",
+ "responseCode":200
},
"input":{"shape":"DescribeDeviceJobRequest"},
"output":{"shape":"DescribeDeviceJobResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DescribeNode":{
"name":"DescribeNode",
"http":{
"method":"GET",
- "requestUri":"/nodes/{NodeId}"
+ "requestUri":"/nodes/{NodeId}",
+ "responseCode":200
},
"input":{"shape":"DescribeNodeRequest"},
"output":{"shape":"DescribeNodeResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DescribeNodeFromTemplateJob":{
"name":"DescribeNodeFromTemplateJob",
"http":{
"method":"GET",
- "requestUri":"/packages/template-job/{JobId}"
+ "requestUri":"/packages/template-job/{JobId}",
+ "responseCode":200
},
"input":{"shape":"DescribeNodeFromTemplateJobRequest"},
"output":{"shape":"DescribeNodeFromTemplateJobResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"DescribePackage":{
"name":"DescribePackage",
"http":{
"method":"GET",
- "requestUri":"/packages/metadata/{PackageId}"
+ "requestUri":"/packages/metadata/{PackageId}",
+ "responseCode":200
},
"input":{"shape":"DescribePackageRequest"},
"output":{"shape":"DescribePackageResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"DescribePackageImportJob":{
"name":"DescribePackageImportJob",
"http":{
"method":"GET",
- "requestUri":"/packages/import-jobs/{JobId}"
+ "requestUri":"/packages/import-jobs/{JobId}",
+ "responseCode":200
},
"input":{"shape":"DescribePackageImportJobRequest"},
"output":{"shape":"DescribePackageImportJobResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"DescribePackageVersion":{
"name":"DescribePackageVersion",
"http":{
"method":"GET",
- "requestUri":"/packages/metadata/{PackageId}/versions/{PackageVersion}"
+ "requestUri":"/packages/metadata/{PackageId}/versions/{PackageVersion}",
+ "responseCode":200
},
"input":{"shape":"DescribePackageVersionRequest"},
"output":{"shape":"DescribePackageVersionResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"ListApplicationInstanceDependencies":{
"name":"ListApplicationInstanceDependencies",
"http":{
"method":"GET",
- "requestUri":"/application-instances/{applicationInstanceId}/package-dependencies"
+ "requestUri":"/application-instances/{ApplicationInstanceId}/package-dependencies",
+ "responseCode":200
},
"input":{"shape":"ListApplicationInstanceDependenciesRequest"},
"output":{"shape":"ListApplicationInstanceDependenciesResponse"},
@@ -295,7 +313,8 @@
"name":"ListApplicationInstanceNodeInstances",
"http":{
"method":"GET",
- "requestUri":"/application-instances/{applicationInstanceId}/node-instances"
+ "requestUri":"/application-instances/{ApplicationInstanceId}/node-instances",
+ "responseCode":200
},
"input":{"shape":"ListApplicationInstanceNodeInstancesRequest"},
"output":{"shape":"ListApplicationInstanceNodeInstancesResponse"},
@@ -308,7 +327,8 @@
"name":"ListApplicationInstances",
"http":{
"method":"GET",
- "requestUri":"/application-instances"
+ "requestUri":"/application-instances",
+ "responseCode":200
},
"input":{"shape":"ListApplicationInstancesRequest"},
"output":{"shape":"ListApplicationInstancesResponse"},
@@ -321,53 +341,57 @@
"name":"ListDevices",
"http":{
"method":"GET",
- "requestUri":"/devices"
+ "requestUri":"/devices",
+ "responseCode":200
},
"input":{"shape":"ListDevicesRequest"},
"output":{"shape":"ListDevicesResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
- {"shape":"AccessDeniedException"}
+ {"shape":"AccessDeniedException"},
+ {"shape":"InternalServerException"}
]
},
"ListDevicesJobs":{
"name":"ListDevicesJobs",
"http":{
"method":"GET",
- "requestUri":"/jobs"
+ "requestUri":"/jobs",
+ "responseCode":200
},
"input":{"shape":"ListDevicesJobsRequest"},
"output":{"shape":"ListDevicesJobsResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"ListNodeFromTemplateJobs":{
"name":"ListNodeFromTemplateJobs",
"http":{
"method":"GET",
- "requestUri":"/packages/template-job"
+ "requestUri":"/packages/template-job",
+ "responseCode":200
},
"input":{"shape":"ListNodeFromTemplateJobsRequest"},
"output":{"shape":"ListNodeFromTemplateJobsResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"ListNodes":{
"name":"ListNodes",
"http":{
"method":"GET",
- "requestUri":"/nodes"
+ "requestUri":"/nodes",
+ "responseCode":200
},
"input":{"shape":"ListNodesRequest"},
"output":{"shape":"ListNodesResponse"},
@@ -381,44 +405,47 @@
"name":"ListPackageImportJobs",
"http":{
"method":"GET",
- "requestUri":"/packages/import-jobs"
+ "requestUri":"/packages/import-jobs",
+ "responseCode":200
},
"input":{"shape":"ListPackageImportJobsRequest"},
"output":{"shape":"ListPackageImportJobsResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"ListPackages":{
"name":"ListPackages",
"http":{
"method":"GET",
- "requestUri":"/packages"
+ "requestUri":"/packages",
+ "responseCode":200
},
"input":{"shape":"ListPackagesRequest"},
"output":{"shape":"ListPackagesResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"ListTagsForResource":{
"name":"ListTagsForResource",
"http":{
"method":"GET",
- "requestUri":"/tags/{ResourceArn}"
+ "requestUri":"/tags/{ResourceArn}",
+ "responseCode":200
},
"input":{"shape":"ListTagsForResourceRequest"},
"output":{"shape":"ListTagsForResourceResponse"},
"errors":[
- {"shape":"ResourceNotFoundException"},
{"shape":"ValidationException"},
+ {"shape":"ResourceNotFoundException"},
{"shape":"InternalServerException"}
]
},
@@ -426,60 +453,64 @@
"name":"ProvisionDevice",
"http":{
"method":"POST",
- "requestUri":"/devices"
+ "requestUri":"/devices",
+ "responseCode":200
},
"input":{"shape":"ProvisionDeviceRequest"},
"output":{"shape":"ProvisionDeviceResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ServiceQuotaExceededException"}
+ {"shape":"ServiceQuotaExceededException"},
+ {"shape":"InternalServerException"}
]
},
"RegisterPackageVersion":{
"name":"RegisterPackageVersion",
"http":{
"method":"PUT",
- "requestUri":"/packages/{PackageId}/versions/{PackageVersion}/patch/{PatchVersion}"
+ "requestUri":"/packages/{PackageId}/versions/{PackageVersion}/patch/{PatchVersion}",
+ "responseCode":200
},
"input":{"shape":"RegisterPackageVersionRequest"},
"output":{"shape":"RegisterPackageVersionResponse"},
"errors":[
+ {"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ConflictException"}
+ {"shape":"InternalServerException"}
]
},
"RemoveApplicationInstance":{
"name":"RemoveApplicationInstance",
"http":{
"method":"DELETE",
- "requestUri":"/application-instances/{applicationInstanceId}"
+ "requestUri":"/application-instances/{ApplicationInstanceId}",
+ "responseCode":200
},
"input":{"shape":"RemoveApplicationInstanceRequest"},
"output":{"shape":"RemoveApplicationInstanceResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
},
"TagResource":{
"name":"TagResource",
"http":{
"method":"POST",
- "requestUri":"/tags/{ResourceArn}"
+ "requestUri":"/tags/{ResourceArn}",
+ "responseCode":200
},
"input":{"shape":"TagResourceRequest"},
"output":{"shape":"TagResourceResponse"},
"errors":[
- {"shape":"ResourceNotFoundException"},
{"shape":"ValidationException"},
+ {"shape":"ResourceNotFoundException"},
{"shape":"InternalServerException"}
]
},
@@ -487,13 +518,14 @@
"name":"UntagResource",
"http":{
"method":"DELETE",
- "requestUri":"/tags/{ResourceArn}"
+ "requestUri":"/tags/{ResourceArn}",
+ "responseCode":200
},
"input":{"shape":"UntagResourceRequest"},
"output":{"shape":"UntagResourceResponse"},
"errors":[
- {"shape":"ResourceNotFoundException"},
{"shape":"ValidationException"},
+ {"shape":"ResourceNotFoundException"},
{"shape":"InternalServerException"}
]
},
@@ -501,16 +533,17 @@
"name":"UpdateDeviceMetadata",
"http":{
"method":"PUT",
- "requestUri":"/devices/{DeviceId}"
+ "requestUri":"/devices/{DeviceId}",
+ "responseCode":200
},
"input":{"shape":"UpdateDeviceMetadataRequest"},
"output":{"shape":"UpdateDeviceMetadataResponse"},
"errors":[
{"shape":"ConflictException"},
{"shape":"ValidationException"},
- {"shape":"InternalServerException"},
{"shape":"AccessDeniedException"},
- {"shape":"ResourceNotFoundException"}
+ {"shape":"ResourceNotFoundException"},
+ {"shape":"InternalServerException"}
]
}
},
@@ -521,22 +554,35 @@
"members":{
"Message":{"shape":"String"}
},
- "error":{"httpStatusCode":403},
+ "error":{
+ "httpStatusCode":403,
+ "senderFault":true
+ },
"exception":true
},
+ "AlternateSoftwareMetadata":{
+ "type":"structure",
+ "members":{
+ "Version":{"shape":"Version"}
+ }
+ },
+ "AlternateSoftwares":{
+ "type":"list",
+ "member":{"shape":"AlternateSoftwareMetadata"}
+ },
"ApplicationInstance":{
"type":"structure",
"members":{
- "Name":{"shape":"ApplicationInstanceName"},
"ApplicationInstanceId":{"shape":"ApplicationInstanceId"},
+ "Arn":{"shape":"ApplicationInstanceArn"},
+ "CreatedTime":{"shape":"TimeStamp"},
"DefaultRuntimeContextDevice":{"shape":"DefaultRuntimeContextDevice"},
"DefaultRuntimeContextDeviceName":{"shape":"DeviceName"},
"Description":{"shape":"Description"},
- "Status":{"shape":"ApplicationInstanceStatus"},
"HealthStatus":{"shape":"ApplicationInstanceHealthStatus"},
+ "Name":{"shape":"ApplicationInstanceName"},
+ "Status":{"shape":"ApplicationInstanceStatus"},
"StatusDescription":{"shape":"ApplicationInstanceStatusDescription"},
- "CreatedTime":{"shape":"TimeStamp"},
- "Arn":{"shape":"ApplicationInstanceArn"},
"Tags":{"shape":"TagMap"}
}
},
@@ -612,13 +658,16 @@
"ResourceType"
],
"members":{
+ "ErrorArguments":{"shape":"ConflictExceptionErrorArgumentList"},
+ "ErrorId":{"shape":"String"},
"Message":{"shape":"String"},
"ResourceId":{"shape":"String"},
- "ResourceType":{"shape":"String"},
- "ErrorId":{"shape":"String"},
- "ErrorArguments":{"shape":"ConflictExceptionErrorArgumentList"}
+ "ResourceType":{"shape":"String"}
+ },
+ "error":{
+ "httpStatusCode":409,
+ "senderFault":true
},
- "error":{"httpStatusCode":409},
"exception":true
},
"ConflictExceptionErrorArgument":{
@@ -646,17 +695,17 @@
"CreateApplicationInstanceRequest":{
"type":"structure",
"required":[
- "ManifestPayload",
- "DefaultRuntimeContextDevice"
+ "DefaultRuntimeContextDevice",
+ "ManifestPayload"
],
"members":{
- "Name":{"shape":"ApplicationInstanceName"},
+ "ApplicationInstanceIdToReplace":{"shape":"ApplicationInstanceId"},
+ "DefaultRuntimeContextDevice":{"shape":"DefaultRuntimeContextDevice"},
"Description":{"shape":"Description"},
- "ManifestPayload":{"shape":"ManifestPayload"},
"ManifestOverridesPayload":{"shape":"ManifestOverridesPayload"},
- "ApplicationInstanceIdToReplace":{"shape":"ApplicationInstanceId"},
+ "ManifestPayload":{"shape":"ManifestPayload"},
+ "Name":{"shape":"ApplicationInstanceName"},
"RuntimeRoleArn":{"shape":"RuntimeRoleArn"},
- "DefaultRuntimeContextDevice":{"shape":"DefaultRuntimeContextDevice"},
"Tags":{"shape":"TagMap"}
}
},
@@ -690,20 +739,20 @@
"CreateNodeFromTemplateJobRequest":{
"type":"structure",
"required":[
- "TemplateType",
+ "NodeName",
"OutputPackageName",
"OutputPackageVersion",
- "NodeName",
- "TemplateParameters"
+ "TemplateParameters",
+ "TemplateType"
],
"members":{
- "TemplateType":{"shape":"TemplateType"},
+ "JobTags":{"shape":"JobTagsList"},
+ "NodeDescription":{"shape":"Description"},
+ "NodeName":{"shape":"NodeName"},
"OutputPackageName":{"shape":"NodePackageName"},
"OutputPackageVersion":{"shape":"NodePackageVersion"},
- "NodeName":{"shape":"NodeName"},
- "NodeDescription":{"shape":"Description"},
"TemplateParameters":{"shape":"TemplateParametersMap"},
- "JobTags":{"shape":"JobTagsList"}
+ "TemplateType":{"shape":"TemplateType"}
}
},
"CreateNodeFromTemplateJobResponse":{
@@ -716,17 +765,17 @@
"CreatePackageImportJobRequest":{
"type":"structure",
"required":[
- "JobType",
+ "ClientToken",
"InputConfig",
- "OutputConfig",
- "ClientToken"
+ "JobType",
+ "OutputConfig"
],
"members":{
- "JobType":{"shape":"PackageImportJobType"},
- "InputConfig":{"shape":"PackageImportJobInputConfig"},
- "OutputConfig":{"shape":"PackageImportJobOutputConfig"},
"ClientToken":{"shape":"ClientToken"},
- "JobTags":{"shape":"JobTagsList"}
+ "InputConfig":{"shape":"PackageImportJobInputConfig"},
+ "JobTags":{"shape":"JobTagsList"},
+ "JobType":{"shape":"PackageImportJobType"},
+ "OutputConfig":{"shape":"PackageImportJobOutputConfig"}
}
},
"CreatePackageImportJobResponse":{
@@ -748,8 +797,8 @@
"type":"structure",
"required":["StorageLocation"],
"members":{
- "PackageId":{"shape":"NodePackageId"},
"Arn":{"shape":"NodePackageArn"},
+ "PackageId":{"shape":"NodePackageId"},
"StorageLocation":{"shape":"StorageLocation"}
}
},
@@ -792,15 +841,15 @@
"type":"structure",
"required":["PackageId"],
"members":{
- "PackageId":{
- "shape":"NodePackageId",
- "location":"uri",
- "locationName":"PackageId"
- },
"ForceDelete":{
"shape":"Boolean",
"location":"querystring",
"locationName":"ForceDelete"
+ },
+ "PackageId":{
+ "shape":"NodePackageId",
+ "location":"uri",
+ "locationName":"PackageId"
}
}
},
@@ -856,21 +905,21 @@
"ApplicationInstanceId":{
"shape":"ApplicationInstanceId",
"location":"uri",
- "locationName":"applicationInstanceId"
+ "locationName":"ApplicationInstanceId"
}
}
},
"DescribeApplicationInstanceDetailsResponse":{
"type":"structure",
"members":{
- "Name":{"shape":"ApplicationInstanceName"},
- "Description":{"shape":"Description"},
- "DefaultRuntimeContextDevice":{"shape":"DefaultRuntimeContextDevice"},
- "ManifestPayload":{"shape":"ManifestPayload"},
- "ManifestOverridesPayload":{"shape":"ManifestOverridesPayload"},
+ "ApplicationInstanceId":{"shape":"ApplicationInstanceId"},
"ApplicationInstanceIdToReplace":{"shape":"ApplicationInstanceId"},
"CreatedTime":{"shape":"TimeStamp"},
- "ApplicationInstanceId":{"shape":"ApplicationInstanceId"}
+ "DefaultRuntimeContextDevice":{"shape":"DefaultRuntimeContextDevice"},
+ "Description":{"shape":"Description"},
+ "ManifestOverridesPayload":{"shape":"ManifestOverridesPayload"},
+ "ManifestPayload":{"shape":"ManifestPayload"},
+ "Name":{"shape":"ApplicationInstanceName"}
}
},
"DescribeApplicationInstanceRequest":{
@@ -880,26 +929,26 @@
"ApplicationInstanceId":{
"shape":"ApplicationInstanceId",
"location":"uri",
- "locationName":"applicationInstanceId"
+ "locationName":"ApplicationInstanceId"
}
}
},
"DescribeApplicationInstanceResponse":{
"type":"structure",
"members":{
- "Name":{"shape":"ApplicationInstanceName"},
- "Description":{"shape":"Description"},
+ "ApplicationInstanceId":{"shape":"ApplicationInstanceId"},
+ "ApplicationInstanceIdToReplace":{"shape":"ApplicationInstanceId"},
+ "Arn":{"shape":"ApplicationInstanceArn"},
+ "CreatedTime":{"shape":"TimeStamp"},
"DefaultRuntimeContextDevice":{"shape":"DefaultRuntimeContextDevice"},
"DefaultRuntimeContextDeviceName":{"shape":"DeviceName"},
- "ApplicationInstanceIdToReplace":{"shape":"ApplicationInstanceId"},
+ "Description":{"shape":"Description"},
+ "HealthStatus":{"shape":"ApplicationInstanceHealthStatus"},
+ "LastUpdatedTime":{"shape":"TimeStamp"},
+ "Name":{"shape":"ApplicationInstanceName"},
"RuntimeRoleArn":{"shape":"RuntimeRoleArn"},
"Status":{"shape":"ApplicationInstanceStatus"},
- "HealthStatus":{"shape":"ApplicationInstanceHealthStatus"},
"StatusDescription":{"shape":"ApplicationInstanceStatusDescription"},
- "CreatedTime":{"shape":"TimeStamp"},
- "LastUpdatedTime":{"shape":"TimeStamp"},
- "ApplicationInstanceId":{"shape":"ApplicationInstanceId"},
- "Arn":{"shape":"ApplicationInstanceArn"},
"Tags":{"shape":"TagMap"}
}
},
@@ -917,14 +966,14 @@
"DescribeDeviceJobResponse":{
"type":"structure",
"members":{
- "JobId":{"shape":"JobId"},
- "DeviceId":{"shape":"DeviceId"},
+ "CreatedTime":{"shape":"UpdateCreatedTime"},
"DeviceArn":{"shape":"DeviceArn"},
+ "DeviceId":{"shape":"DeviceId"},
"DeviceName":{"shape":"DeviceName"},
"DeviceType":{"shape":"DeviceType"},
"ImageVersion":{"shape":"ImageVersion"},
- "Status":{"shape":"UpdateProgress"},
- "CreatedTime":{"shape":"UpdateCreatedTime"}
+ "JobId":{"shape":"JobId"},
+ "Status":{"shape":"UpdateProgress"}
}
},
"DescribeDeviceRequest":{
@@ -941,21 +990,23 @@
"DescribeDeviceResponse":{
"type":"structure",
"members":{
- "DeviceId":{"shape":"DeviceId"},
- "Name":{"shape":"DeviceName"},
+ "AlternateSoftwares":{"shape":"AlternateSoftwares"},
"Arn":{"shape":"DeviceArn"},
+ "CreatedTime":{"shape":"CreatedTime"},
+ "CurrentNetworkingStatus":{"shape":"NetworkStatus"},
+ "CurrentSoftware":{"shape":"CurrentSoftware"},
"Description":{"shape":"Description"},
- "Type":{"shape":"DeviceType"},
"DeviceConnectionStatus":{"shape":"DeviceConnectionStatus"},
- "CreatedTime":{"shape":"CreatedTime"},
- "ProvisioningStatus":{"shape":"DeviceStatus"},
+ "DeviceId":{"shape":"DeviceId"},
+ "LatestAlternateSoftware":{"shape":"LatestAlternateSoftware"},
"LatestSoftware":{"shape":"LatestSoftware"},
- "CurrentSoftware":{"shape":"CurrentSoftware"},
+ "LeaseExpirationTime":{"shape":"LeaseExpirationTime"},
+ "Name":{"shape":"DeviceName"},
+ "NetworkingConfiguration":{"shape":"NetworkPayload"},
+ "ProvisioningStatus":{"shape":"DeviceStatus"},
"SerialNumber":{"shape":"DeviceSerialNumber"},
"Tags":{"shape":"TagMap"},
- "NetworkingConfiguration":{"shape":"NetworkPayload"},
- "CurrentNetworkingStatus":{"shape":"NetworkStatus"},
- "LeaseExpirationTime":{"shape":"LeaseExpirationTime"}
+ "Type":{"shape":"DeviceType"}
}
},
"DescribeNodeFromTemplateJobRequest":{
@@ -972,30 +1023,30 @@
"DescribeNodeFromTemplateJobResponse":{
"type":"structure",
"required":[
- "JobId",
- "Status",
- "StatusMessage",
"CreatedTime",
+ "JobId",
"LastUpdatedTime",
+ "NodeName",
"OutputPackageName",
"OutputPackageVersion",
- "NodeName",
- "TemplateType",
- "TemplateParameters"
+ "Status",
+ "StatusMessage",
+ "TemplateParameters",
+ "TemplateType"
],
"members":{
- "JobId":{"shape":"JobId"},
- "Status":{"shape":"NodeFromTemplateJobStatus"},
- "StatusMessage":{"shape":"NodeFromTemplateJobStatusMessage"},
"CreatedTime":{"shape":"CreatedTime"},
+ "JobId":{"shape":"JobId"},
+ "JobTags":{"shape":"JobTagsList"},
"LastUpdatedTime":{"shape":"LastUpdatedTime"},
+ "NodeDescription":{"shape":"Description"},
+ "NodeName":{"shape":"NodeName"},
"OutputPackageName":{"shape":"NodePackageName"},
"OutputPackageVersion":{"shape":"NodePackageVersion"},
- "NodeName":{"shape":"NodeName"},
- "NodeDescription":{"shape":"Description"},
- "TemplateType":{"shape":"TemplateType"},
+ "Status":{"shape":"NodeFromTemplateJobStatus"},
+ "StatusMessage":{"shape":"NodeFromTemplateJobStatusMessage"},
"TemplateParameters":{"shape":"TemplateParametersMap"},
- "JobTags":{"shape":"JobTagsList"}
+ "TemplateType":{"shape":"TemplateType"}
}
},
"DescribeNodeRequest":{
@@ -1017,34 +1068,34 @@
"DescribeNodeResponse":{
"type":"structure",
"required":[
- "NodeId",
- "Name",
"Category",
+ "CreatedTime",
+ "Description",
+ "LastUpdatedTime",
+ "Name",
+ "NodeId",
+ "NodeInterface",
"OwnerAccount",
- "PackageName",
"PackageId",
+ "PackageName",
"PackageVersion",
- "PatchVersion",
- "NodeInterface",
- "Description",
- "CreatedTime",
- "LastUpdatedTime"
+ "PatchVersion"
],
"members":{
- "NodeId":{"shape":"NodeId"},
- "Name":{"shape":"NodeName"},
+ "AssetName":{"shape":"NodeAssetName"},
"Category":{"shape":"NodeCategory"},
+ "CreatedTime":{"shape":"TimeStamp"},
+ "Description":{"shape":"Description"},
+ "LastUpdatedTime":{"shape":"TimeStamp"},
+ "Name":{"shape":"NodeName"},
+ "NodeId":{"shape":"NodeId"},
+ "NodeInterface":{"shape":"NodeInterface"},
"OwnerAccount":{"shape":"PackageOwnerAccount"},
- "PackageName":{"shape":"NodePackageName"},
- "PackageId":{"shape":"NodePackageId"},
"PackageArn":{"shape":"NodePackageArn"},
+ "PackageId":{"shape":"NodePackageId"},
+ "PackageName":{"shape":"NodePackageName"},
"PackageVersion":{"shape":"NodePackageVersion"},
- "PatchVersion":{"shape":"NodePackagePatchVersion"},
- "NodeInterface":{"shape":"NodeInterface"},
- "AssetName":{"shape":"NodeAssetName"},
- "Description":{"shape":"Description"},
- "CreatedTime":{"shape":"TimeStamp"},
- "LastUpdatedTime":{"shape":"TimeStamp"}
+ "PatchVersion":{"shape":"NodePackagePatchVersion"}
}
},
"DescribePackageImportJobRequest":{
@@ -1061,28 +1112,28 @@
"DescribePackageImportJobResponse":{
"type":"structure",
"required":[
+ "CreatedTime",
+ "InputConfig",
"JobId",
"JobType",
- "InputConfig",
- "OutputConfig",
- "Output",
- "CreatedTime",
"LastUpdatedTime",
+ "Output",
+ "OutputConfig",
"Status",
"StatusMessage"
],
"members":{
- "JobId":{"shape":"JobId"},
"ClientToken":{"shape":"ClientToken"},
- "JobType":{"shape":"PackageImportJobType"},
- "InputConfig":{"shape":"PackageImportJobInputConfig"},
- "OutputConfig":{"shape":"PackageImportJobOutputConfig"},
- "Output":{"shape":"PackageImportJobOutput"},
"CreatedTime":{"shape":"CreatedTime"},
+ "InputConfig":{"shape":"PackageImportJobInputConfig"},
+ "JobId":{"shape":"JobId"},
+ "JobTags":{"shape":"JobTagsList"},
+ "JobType":{"shape":"PackageImportJobType"},
"LastUpdatedTime":{"shape":"LastUpdatedTime"},
+ "Output":{"shape":"PackageImportJobOutput"},
+ "OutputConfig":{"shape":"PackageImportJobOutputConfig"},
"Status":{"shape":"PackageImportJobStatus"},
- "StatusMessage":{"shape":"PackageImportJobStatusMessage"},
- "JobTags":{"shape":"JobTagsList"}
+ "StatusMessage":{"shape":"PackageImportJobStatusMessage"}
}
},
"DescribePackageRequest":{
@@ -1099,22 +1150,22 @@
"DescribePackageResponse":{
"type":"structure",
"required":[
+ "Arn",
+ "CreatedTime",
"PackageId",
"PackageName",
- "Arn",
"StorageLocation",
- "CreatedTime",
"Tags"
],
"members":{
+ "Arn":{"shape":"NodePackageArn"},
+ "CreatedTime":{"shape":"TimeStamp"},
"PackageId":{"shape":"NodePackageId"},
"PackageName":{"shape":"NodePackageName"},
- "Arn":{"shape":"NodePackageArn"},
- "StorageLocation":{"shape":"StorageLocation"},
"ReadAccessPrincipalArns":{"shape":"PrincipalArnsList"},
- "WriteAccessPrincipalArns":{"shape":"PrincipalArnsList"},
- "CreatedTime":{"shape":"TimeStamp"},
- "Tags":{"shape":"TagMap"}
+ "StorageLocation":{"shape":"StorageLocation"},
+ "Tags":{"shape":"TagMap"},
+ "WriteAccessPrincipalArns":{"shape":"PrincipalArnsList"}
}
},
"DescribePackageVersionRequest":{
@@ -1149,24 +1200,24 @@
"DescribePackageVersionResponse":{
"type":"structure",
"required":[
+ "IsLatestPatch",
"PackageId",
"PackageName",
"PackageVersion",
"PatchVersion",
- "IsLatestPatch",
"Status"
],
"members":{
+ "IsLatestPatch":{"shape":"Boolean"},
"OwnerAccount":{"shape":"PackageOwnerAccount"},
- "PackageId":{"shape":"NodePackageId"},
"PackageArn":{"shape":"NodePackageArn"},
+ "PackageId":{"shape":"NodePackageId"},
"PackageName":{"shape":"NodePackageName"},
"PackageVersion":{"shape":"NodePackageVersion"},
"PatchVersion":{"shape":"NodePackagePatchVersion"},
- "IsLatestPatch":{"shape":"Boolean"},
+ "RegisteredTime":{"shape":"TimeStamp"},
"Status":{"shape":"PackageVersionStatus"},
- "StatusDescription":{"shape":"PackageVersionStatusDescription"},
- "RegisteredTime":{"shape":"TimeStamp"}
+ "StatusDescription":{"shape":"PackageVersionStatusDescription"}
}
},
"Description":{
@@ -1178,12 +1229,12 @@
"Device":{
"type":"structure",
"members":{
- "DeviceId":{"shape":"DeviceId"},
- "Name":{"shape":"DeviceName"},
"CreatedTime":{"shape":"CreatedTime"},
- "ProvisioningStatus":{"shape":"DeviceStatus"},
+ "DeviceId":{"shape":"DeviceId"},
"LastUpdatedTime":{"shape":"LastUpdatedTime"},
- "LeaseExpirationTime":{"shape":"LeaseExpirationTime"}
+ "LeaseExpirationTime":{"shape":"LeaseExpirationTime"},
+ "Name":{"shape":"DeviceName"},
+ "ProvisioningStatus":{"shape":"DeviceStatus"}
}
},
"DeviceArn":{
@@ -1216,10 +1267,10 @@
"DeviceJob":{
"type":"structure",
"members":{
- "DeviceName":{"shape":"DeviceName"},
+ "CreatedTime":{"shape":"CreatedTime"},
"DeviceId":{"shape":"DeviceId"},
- "JobId":{"shape":"JobId"},
- "CreatedTime":{"shape":"CreatedTime"}
+ "DeviceName":{"shape":"DeviceName"},
+ "JobId":{"shape":"JobId"}
}
},
"DeviceJobConfig":{
@@ -1285,9 +1336,9 @@
"EthernetStatus":{
"type":"structure",
"members":{
- "IpAddress":{"shape":"IpAddress"},
"ConnectionStatus":{"shape":"NetworkConnectionStatus"},
- "HwAddress":{"shape":"HwAddress"}
+ "HwAddress":{"shape":"HwAddress"},
+ "IpAddress":{"shape":"IpAddress"}
}
},
"HwAddress":{
@@ -1331,11 +1382,17 @@
"min":1,
"pattern":"^((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d))(:(6553[0-5]|655[0-2]\\d|65[0-4]\\d{2}|6[0-4]\\d{3}|[1-5]\\d{4}|[1-9]\\d{0,3}))?$"
},
+ "IpAddressOrServerName":{
+ "type":"string",
+ "max":255,
+ "min":1,
+ "pattern":"(^([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,}$)|(^((25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d)\\.(25[0-5]|2[0-4]\\d|1\\d\\d|[1-9]?\\d))(:(6553[0-5]|655[0-2]\\d|65[0-4]\\d{2}|6[0-4]\\d{3}|[1-5]\\d{4}|[1-9]\\d{0,3}))?$)"
+ },
"Job":{
"type":"structure",
"members":{
- "JobId":{"shape":"JobId"},
- "DeviceId":{"shape":"DeviceId"}
+ "DeviceId":{"shape":"DeviceId"},
+ "JobId":{"shape":"JobId"}
}
},
"JobId":{
@@ -1372,6 +1429,11 @@
"enum":["OTA"]
},
"LastUpdatedTime":{"type":"timestamp"},
+ "LatestAlternateSoftware":{
+ "type":"string",
+ "max":255,
+ "min":1
+ },
"LatestSoftware":{
"type":"string",
"max":255,
@@ -1385,7 +1447,7 @@
"ApplicationInstanceId":{
"shape":"ApplicationInstanceId",
"location":"uri",
- "locationName":"applicationInstanceId"
+ "locationName":"ApplicationInstanceId"
},
"MaxResults":{
"shape":"MaxSize25",
@@ -1402,8 +1464,8 @@
"ListApplicationInstanceDependenciesResponse":{
"type":"structure",
"members":{
- "PackageObjects":{"shape":"PackageObjects"},
- "NextToken":{"shape":"NextToken"}
+ "NextToken":{"shape":"NextToken"},
+ "PackageObjects":{"shape":"PackageObjects"}
}
},
"ListApplicationInstanceNodeInstancesRequest":{
@@ -1413,7 +1475,7 @@
"ApplicationInstanceId":{
"shape":"ApplicationInstanceId",
"location":"uri",
- "locationName":"applicationInstanceId"
+ "locationName":"ApplicationInstanceId"
},
"MaxResults":{
"shape":"MaxSize25",
@@ -1430,8 +1492,8 @@
"ListApplicationInstanceNodeInstancesResponse":{
"type":"structure",
"members":{
- "NodeInstances":{"shape":"NodeInstances"},
- "NextToken":{"shape":"NextToken"}
+ "NextToken":{"shape":"NextToken"},
+ "NodeInstances":{"shape":"NodeInstances"}
}
},
"ListApplicationInstancesRequest":{
@@ -1442,11 +1504,6 @@
"location":"querystring",
"locationName":"deviceId"
},
- "StatusFilter":{
- "shape":"StatusFilter",
- "location":"querystring",
- "locationName":"statusFilter"
- },
"MaxResults":{
"shape":"MaxSize25",
"location":"querystring",
@@ -1456,6 +1513,11 @@
"shape":"NextToken",
"location":"querystring",
"locationName":"nextToken"
+ },
+ "StatusFilter":{
+ "shape":"StatusFilter",
+ "location":"querystring",
+ "locationName":"statusFilter"
}
}
},
@@ -1474,15 +1536,15 @@
"location":"querystring",
"locationName":"DeviceId"
},
- "NextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"NextToken"
- },
"MaxResults":{
"shape":"MaxSize25",
"location":"querystring",
"locationName":"MaxResults"
+ },
+ "NextToken":{
+ "shape":"NextToken",
+ "location":"querystring",
+ "locationName":"NextToken"
}
}
},
@@ -1496,15 +1558,15 @@
"ListDevicesRequest":{
"type":"structure",
"members":{
- "NextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"NextToken"
- },
"MaxResults":{
"shape":"MaxSize25",
"location":"querystring",
"locationName":"MaxResults"
+ },
+ "NextToken":{
+ "shape":"NextToken",
+ "location":"querystring",
+ "locationName":"NextToken"
}
}
},
@@ -1519,15 +1581,15 @@
"ListNodeFromTemplateJobsRequest":{
"type":"structure",
"members":{
- "NextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"NextToken"
- },
"MaxResults":{
"shape":"MaxSize25",
"location":"querystring",
"locationName":"MaxResults"
+ },
+ "NextToken":{
+ "shape":"NextToken",
+ "location":"querystring",
+ "locationName":"NextToken"
}
}
},
@@ -1535,8 +1597,8 @@
"type":"structure",
"required":["NodeFromTemplateJobs"],
"members":{
- "NodeFromTemplateJobs":{"shape":"NodeFromTemplateJobList"},
- "NextToken":{"shape":"NextToken"}
+ "NextToken":{"shape":"NextToken"},
+ "NodeFromTemplateJobs":{"shape":"NodeFromTemplateJobList"}
}
},
"ListNodesRequest":{
@@ -1547,6 +1609,16 @@
"location":"querystring",
"locationName":"category"
},
+ "MaxResults":{
+ "shape":"MaxSize25",
+ "location":"querystring",
+ "locationName":"maxResults"
+ },
+ "NextToken":{
+ "shape":"Token",
+ "location":"querystring",
+ "locationName":"nextToken"
+ },
"OwnerAccount":{
"shape":"PackageOwnerAccount",
"location":"querystring",
@@ -1566,38 +1638,28 @@
"shape":"NodePackagePatchVersion",
"location":"querystring",
"locationName":"patchVersion"
- },
- "NextToken":{
- "shape":"Token",
- "location":"querystring",
- "locationName":"nextToken"
- },
- "MaxResults":{
- "shape":"MaxSize25",
- "location":"querystring",
- "locationName":"maxResults"
}
}
},
"ListNodesResponse":{
"type":"structure",
"members":{
- "Nodes":{"shape":"NodesList"},
- "NextToken":{"shape":"Token"}
+ "NextToken":{"shape":"Token"},
+ "Nodes":{"shape":"NodesList"}
}
},
"ListPackageImportJobsRequest":{
"type":"structure",
"members":{
- "NextToken":{
- "shape":"NextToken",
- "location":"querystring",
- "locationName":"NextToken"
- },
"MaxResults":{
"shape":"MaxSize25",
"location":"querystring",
"locationName":"MaxResults"
+ },
+ "NextToken":{
+ "shape":"NextToken",
+ "location":"querystring",
+ "locationName":"NextToken"
}
}
},
@@ -1605,8 +1667,8 @@
"type":"structure",
"required":["PackageImportJobs"],
"members":{
- "PackageImportJobs":{"shape":"PackageImportJobList"},
- "NextToken":{"shape":"NextToken"}
+ "NextToken":{"shape":"NextToken"},
+ "PackageImportJobs":{"shape":"PackageImportJobList"}
}
},
"ListPackagesRequest":{
@@ -1627,8 +1689,8 @@
"ListPackagesResponse":{
"type":"structure",
"members":{
- "Packages":{"shape":"PackageList"},
- "NextToken":{"shape":"NextToken"}
+ "NextToken":{"shape":"NextToken"},
+ "Packages":{"shape":"PackageList"}
}
},
"ListTagsForResourceRequest":{
@@ -1691,21 +1753,25 @@
"type":"string",
"enum":[
"CONNECTED",
- "NOT_CONNECTED"
+ "NOT_CONNECTED",
+ "CONNECTING"
]
},
"NetworkPayload":{
"type":"structure",
"members":{
"Ethernet0":{"shape":"EthernetPayload"},
- "Ethernet1":{"shape":"EthernetPayload"}
+ "Ethernet1":{"shape":"EthernetPayload"},
+ "Ntp":{"shape":"NtpPayload"}
}
},
"NetworkStatus":{
"type":"structure",
"members":{
"Ethernet0Status":{"shape":"EthernetStatus"},
- "Ethernet1Status":{"shape":"EthernetStatus"}
+ "Ethernet1Status":{"shape":"EthernetStatus"},
+ "LastUpdatedTime":{"shape":"LastUpdatedTime"},
+ "NtpStatus":{"shape":"NtpStatus"}
}
},
"NextToken":{
@@ -1717,27 +1783,27 @@
"Node":{
"type":"structure",
"required":[
- "NodeId",
- "Name",
"Category",
- "PackageName",
+ "CreatedTime",
+ "Name",
+ "NodeId",
"PackageId",
+ "PackageName",
"PackageVersion",
- "PatchVersion",
- "CreatedTime"
+ "PatchVersion"
],
"members":{
- "NodeId":{"shape":"NodeId"},
- "Name":{"shape":"NodeName"},
"Category":{"shape":"NodeCategory"},
+ "CreatedTime":{"shape":"TimeStamp"},
+ "Description":{"shape":"Description"},
+ "Name":{"shape":"NodeName"},
+ "NodeId":{"shape":"NodeId"},
"OwnerAccount":{"shape":"PackageOwnerAccount"},
- "PackageName":{"shape":"NodePackageName"},
- "PackageId":{"shape":"NodePackageId"},
"PackageArn":{"shape":"NodePackageArn"},
+ "PackageId":{"shape":"NodePackageId"},
+ "PackageName":{"shape":"NodePackageName"},
"PackageVersion":{"shape":"NodePackageVersion"},
- "PatchVersion":{"shape":"NodePackagePatchVersion"},
- "Description":{"shape":"Description"},
- "CreatedTime":{"shape":"TimeStamp"}
+ "PatchVersion":{"shape":"NodePackagePatchVersion"}
}
},
"NodeAssetName":{
@@ -1758,12 +1824,12 @@
"NodeFromTemplateJob":{
"type":"structure",
"members":{
+ "CreatedTime":{"shape":"CreatedTime"},
"JobId":{"shape":"JobId"},
- "TemplateType":{"shape":"TemplateType"},
+ "NodeName":{"shape":"NodeName"},
"Status":{"shape":"NodeFromTemplateJobStatus"},
"StatusMessage":{"shape":"NodeFromTemplateJobStatusMessage"},
- "CreatedTime":{"shape":"CreatedTime"},
- "NodeName":{"shape":"NodeName"}
+ "TemplateType":{"shape":"TemplateType"}
}
},
"NodeFromTemplateJobList":{
@@ -1788,27 +1854,27 @@
"NodeInputPort":{
"type":"structure",
"members":{
- "Name":{"shape":"PortName"},
- "Description":{"shape":"Description"},
- "Type":{"shape":"PortType"},
"DefaultValue":{"shape":"PortDefaultValue"},
- "MaxConnections":{"shape":"MaxConnections"}
+ "Description":{"shape":"Description"},
+ "MaxConnections":{"shape":"MaxConnections"},
+ "Name":{"shape":"PortName"},
+ "Type":{"shape":"PortType"}
}
},
"NodeInstance":{
"type":"structure",
"required":[
- "NodeInstanceId",
- "CurrentStatus"
+ "CurrentStatus",
+ "NodeInstanceId"
],
"members":{
- "NodeInstanceId":{"shape":"NodeInstanceId"},
+ "CurrentStatus":{"shape":"NodeInstanceStatus"},
"NodeId":{"shape":"NodeId"},
+ "NodeInstanceId":{"shape":"NodeInstanceId"},
+ "NodeName":{"shape":"NodeName"},
"PackageName":{"shape":"NodePackageName"},
- "PackageVersion":{"shape":"NodePackageVersion"},
"PackagePatchVersion":{"shape":"NodePackagePatchVersion"},
- "NodeName":{"shape":"NodeName"},
- "CurrentStatus":{"shape":"NodeInstanceStatus"}
+ "PackageVersion":{"shape":"NodePackageVersion"}
}
},
"NodeInstanceId":{
@@ -1849,8 +1915,8 @@
"NodeOutputPort":{
"type":"structure",
"members":{
- "Name":{"shape":"PortName"},
"Description":{"shape":"Description"},
+ "Name":{"shape":"PortName"},
"Type":{"shape":"PortType"}
}
},
@@ -1887,6 +1953,32 @@
"type":"list",
"member":{"shape":"Node"}
},
+ "NtpPayload":{
+ "type":"structure",
+ "required":["NtpServers"],
+ "members":{
+ "NtpServers":{"shape":"NtpServerList"}
+ }
+ },
+ "NtpServerList":{
+ "type":"list",
+ "member":{"shape":"IpAddressOrServerName"},
+ "max":5,
+ "min":0
+ },
+ "NtpServerName":{
+ "type":"string",
+ "max":255,
+ "min":1
+ },
+ "NtpStatus":{
+ "type":"structure",
+ "members":{
+ "ConnectionStatus":{"shape":"NetworkConnectionStatus"},
+ "IpAddress":{"shape":"IpAddress"},
+ "NtpServerName":{"shape":"NtpServerName"}
+ }
+ },
"OTAJobConfig":{
"type":"structure",
"required":["ImageVersion"],
@@ -1919,12 +2011,12 @@
"PackageImportJob":{
"type":"structure",
"members":{
+ "CreatedTime":{"shape":"CreatedTime"},
"JobId":{"shape":"JobId"},
"JobType":{"shape":"PackageImportJobType"},
+ "LastUpdatedTime":{"shape":"LastUpdatedTime"},
"Status":{"shape":"PackageImportJobStatus"},
- "StatusMessage":{"shape":"PackageImportJobStatusMessage"},
- "CreatedTime":{"shape":"CreatedTime"},
- "LastUpdatedTime":{"shape":"LastUpdatedTime"}
+ "StatusMessage":{"shape":"PackageImportJobStatusMessage"}
}
},
"PackageImportJobInputConfig":{
@@ -1940,16 +2032,16 @@
"PackageImportJobOutput":{
"type":"structure",
"required":[
+ "OutputS3Location",
"PackageId",
"PackageVersion",
- "PatchVersion",
- "OutputS3Location"
+ "PatchVersion"
],
"members":{
+ "OutputS3Location":{"shape":"OutPutS3Location"},
"PackageId":{"shape":"NodePackageId"},
"PackageVersion":{"shape":"NodePackageVersion"},
- "PatchVersion":{"shape":"NodePackagePatchVersion"},
- "OutputS3Location":{"shape":"OutPutS3Location"}
+ "PatchVersion":{"shape":"NodePackagePatchVersion"}
}
},
"PackageImportJobOutputConfig":{
@@ -1969,7 +2061,10 @@
"PackageImportJobStatusMessage":{"type":"string"},
"PackageImportJobType":{
"type":"string",
- "enum":["NODE_PACKAGE_VERSION"]
+ "enum":[
+ "NODE_PACKAGE_VERSION",
+ "MARKETPLACE_NODE_PACKAGE_VERSION"
+ ]
},
"PackageList":{
"type":"list",
@@ -1978,10 +2073,10 @@
"PackageListItem":{
"type":"structure",
"members":{
- "PackageId":{"shape":"NodePackageId"},
- "PackageName":{"shape":"NodePackageName"},
"Arn":{"shape":"NodePackageArn"},
"CreatedTime":{"shape":"TimeStamp"},
+ "PackageId":{"shape":"NodePackageId"},
+ "PackageName":{"shape":"NodePackageName"},
"Tags":{"shape":"TagMap"}
}
},
@@ -2022,9 +2117,9 @@
"PackageVersion"
],
"members":{
+ "MarkLatest":{"shape":"MarkLatestPatch"},
"PackageName":{"shape":"NodePackageName"},
- "PackageVersion":{"shape":"NodePackageVersion"},
- "MarkLatest":{"shape":"MarkLatestPatch"}
+ "PackageVersion":{"shape":"NodePackageVersion"}
}
},
"PackageVersionStatus":{
@@ -2076,10 +2171,10 @@
"type":"structure",
"required":["Name"],
"members":{
- "Name":{"shape":"DeviceName"},
"Description":{"shape":"Description"},
- "Tags":{"shape":"TagMap"},
- "NetworkingConfiguration":{"shape":"NetworkPayload"}
+ "Name":{"shape":"DeviceName"},
+ "NetworkingConfiguration":{"shape":"NetworkPayload"},
+ "Tags":{"shape":"TagMap"}
}
},
"ProvisionDeviceResponse":{
@@ -2089,11 +2184,11 @@
"Status"
],
"members":{
- "DeviceId":{"shape":"DeviceId"},
"Arn":{"shape":"DeviceArn"},
- "Status":{"shape":"DeviceStatus"},
"Certificates":{"shape":"Certificates"},
- "IotThingName":{"shape":"IotThingName"}
+ "DeviceId":{"shape":"DeviceId"},
+ "IotThingName":{"shape":"IotThingName"},
+ "Status":{"shape":"DeviceStatus"}
}
},
"Region":{
@@ -2110,6 +2205,7 @@
"PatchVersion"
],
"members":{
+ "MarkLatest":{"shape":"MarkLatestPatch"},
"OwnerAccount":{"shape":"PackageOwnerAccount"},
"PackageId":{
"shape":"NodePackageId",
@@ -2125,8 +2221,7 @@
"shape":"NodePackagePatchVersion",
"location":"uri",
"locationName":"PatchVersion"
- },
- "MarkLatest":{"shape":"MarkLatestPatch"}
+ }
}
},
"RegisterPackageVersionResponse":{
@@ -2141,7 +2236,7 @@
"ApplicationInstanceId":{
"shape":"ApplicationInstanceId",
"location":"uri",
- "locationName":"applicationInstanceId"
+ "locationName":"ApplicationInstanceId"
}
}
},
@@ -2168,7 +2263,10 @@
"ResourceId":{"shape":"String"},
"ResourceType":{"shape":"String"}
},
- "error":{"httpStatusCode":404},
+ "error":{
+ "httpStatusCode":404,
+ "senderFault":true
+ },
"exception":true
},
"RetryAfterSeconds":{"type":"integer"},
@@ -2185,9 +2283,9 @@
"ObjectKey"
],
"members":{
- "Region":{"shape":"Region"},
"BucketName":{"shape":"BucketName"},
- "ObjectKey":{"shape":"ObjectKey"}
+ "ObjectKey":{"shape":"ObjectKey"},
+ "Region":{"shape":"Region"}
}
},
"ServiceQuotaExceededException":{
@@ -2199,27 +2297,30 @@
],
"members":{
"Message":{"shape":"String"},
+ "QuotaCode":{"shape":"String"},
"ResourceId":{"shape":"String"},
"ResourceType":{"shape":"String"},
- "QuotaCode":{"shape":"String"},
"ServiceCode":{"shape":"String"}
},
- "error":{"httpStatusCode":402},
+ "error":{
+ "httpStatusCode":402,
+ "senderFault":true
+ },
"exception":true
},
"StaticIpConnectionInfo":{
"type":"structure",
"required":[
- "IpAddress",
- "Mask",
+ "DefaultGateway",
"Dns",
- "DefaultGateway"
+ "IpAddress",
+ "Mask"
],
"members":{
- "IpAddress":{"shape":"IpAddress"},
- "Mask":{"shape":"Mask"},
+ "DefaultGateway":{"shape":"DefaultGateway"},
"Dns":{"shape":"DnsList"},
- "DefaultGateway":{"shape":"DefaultGateway"}
+ "IpAddress":{"shape":"IpAddress"},
+ "Mask":{"shape":"Mask"}
}
},
"StatusFilter":{
@@ -2236,18 +2337,18 @@
"StorageLocation":{
"type":"structure",
"required":[
+ "BinaryPrefixLocation",
"Bucket",
- "RepoPrefixLocation",
"GeneratedPrefixLocation",
- "BinaryPrefixLocation",
- "ManifestPrefixLocation"
+ "ManifestPrefixLocation",
+ "RepoPrefixLocation"
],
"members":{
+ "BinaryPrefixLocation":{"shape":"Object"},
"Bucket":{"shape":"Bucket"},
- "RepoPrefixLocation":{"shape":"Object"},
"GeneratedPrefixLocation":{"shape":"Object"},
- "BinaryPrefixLocation":{"shape":"Object"},
- "ManifestPrefixLocation":{"shape":"Object"}
+ "ManifestPrefixLocation":{"shape":"Object"},
+ "RepoPrefixLocation":{"shape":"Object"}
}
},
"String":{"type":"string"},
@@ -2354,12 +2455,12 @@
"type":"structure",
"required":["DeviceId"],
"members":{
+ "Description":{"shape":"Description"},
"DeviceId":{
"shape":"DeviceId",
"location":"uri",
"locationName":"DeviceId"
- },
- "Description":{"shape":"Description"}
+ }
}
},
"UpdateDeviceMetadataResponse":{
@@ -2384,13 +2485,16 @@
"type":"structure",
"required":["Message"],
"members":{
- "Message":{"shape":"String"},
- "Reason":{"shape":"ValidationExceptionReason"},
- "ErrorId":{"shape":"String"},
"ErrorArguments":{"shape":"ValidationExceptionErrorArgumentList"},
- "Fields":{"shape":"ValidationExceptionFieldList"}
+ "ErrorId":{"shape":"String"},
+ "Fields":{"shape":"ValidationExceptionFieldList"},
+ "Message":{"shape":"String"},
+ "Reason":{"shape":"ValidationExceptionReason"}
+ },
+ "error":{
+ "httpStatusCode":400,
+ "senderFault":true
},
- "error":{"httpStatusCode":400},
"exception":true
},
"ValidationExceptionErrorArgument":{
@@ -2411,12 +2515,12 @@
"ValidationExceptionField":{
"type":"structure",
"required":[
- "Name",
- "Message"
+ "Message",
+ "Name"
],
"members":{
- "Name":{"shape":"String"},
- "Message":{"shape":"String"}
+ "Message":{"shape":"String"},
+ "Name":{"shape":"String"}
}
},
"ValidationExceptionFieldList":{
@@ -2431,6 +2535,11 @@
"FIELD_VALIDATION_FAILED",
"OTHER"
]
+ },
+ "Version":{
+ "type":"string",
+ "max":255,
+ "min":1
}
}
}
diff --git a/models/apis/panorama/2019-07-24/docs-2.json b/models/apis/panorama/2019-07-24/docs-2.json
index d54a44dab75..6a91023ff26 100644
--- a/models/apis/panorama/2019-07-24/docs-2.json
+++ b/models/apis/panorama/2019-07-24/docs-2.json
@@ -1,6 +1,6 @@
{
"version": "2.0",
- "service": "AWS Panorama Overview
This is the AWS Panorama API Reference. For an introduction to the service, see What is AWS Panorama? in the AWS Panorama Developer Guide.
",
+ "service": "AWS Panorama
Overview
This is the AWS Panorama API Reference. For an introduction to the service, see What is AWS Panorama? in the AWS Panorama Developer Guide.
",
"operations": {
"CreateApplicationInstance": "Creates an application instance and deploys it to a device.
",
"CreateJobForDevices": "Creates a job to run on one or more devices.
",
@@ -8,7 +8,7 @@
"CreatePackage": "Creates a package and storage location in an Amazon S3 access point.
",
"CreatePackageImportJob": "Imports a node package.
",
"DeleteDevice": "Deletes a device.
",
- "DeletePackage": "Deletes a package.
",
+ "DeletePackage": "Deletes a package.
To delete a package, you need permission to call s3:DeleteObject
in addition to permissions for the AWS Panorama API.
",
"DeregisterPackageVersion": "Deregisters a package version.
",
"DescribeApplicationInstance": "Returns information about an application instance on a device.
",
"DescribeApplicationInstanceDetails": "Returns information about an application instance's configuration manifest.
",
@@ -42,6 +42,18 @@
"refs": {
}
},
+ "AlternateSoftwareMetadata": {
+ "base": "Details about a beta appliance software update.
",
+ "refs": {
+ "AlternateSoftwares$member": null
+ }
+ },
+ "AlternateSoftwares": {
+ "base": null,
+ "refs": {
+ "DescribeDeviceResponse$AlternateSoftwares": "Beta software releases available for the device.
"
+ }
+ },
"ApplicationInstance": {
"base": "An application instance on a device.
",
"refs": {
@@ -69,11 +81,11 @@
"CreateApplicationInstanceRequest$ApplicationInstanceIdToReplace": "The ID of an application instance to replace with the new instance.
",
"CreateApplicationInstanceResponse$ApplicationInstanceId": "The application instance's ID.
",
"DescribeApplicationInstanceDetailsRequest$ApplicationInstanceId": "The application instance's ID.
",
- "DescribeApplicationInstanceDetailsResponse$ApplicationInstanceIdToReplace": "The ID of the application instance that this instance replaced.
",
"DescribeApplicationInstanceDetailsResponse$ApplicationInstanceId": "The application instance's ID.
",
+ "DescribeApplicationInstanceDetailsResponse$ApplicationInstanceIdToReplace": "The ID of the application instance that this instance replaced.
",
"DescribeApplicationInstanceRequest$ApplicationInstanceId": "The application instance's ID.
",
- "DescribeApplicationInstanceResponse$ApplicationInstanceIdToReplace": "The ID of the application instance that this instance replaced.
",
"DescribeApplicationInstanceResponse$ApplicationInstanceId": "The application instance's ID.
",
+ "DescribeApplicationInstanceResponse$ApplicationInstanceIdToReplace": "The ID of the application instance that this instance replaced.
",
"ListApplicationInstanceDependenciesRequest$ApplicationInstanceId": "The application instance's ID.
",
"ListApplicationInstanceNodeInstancesRequest$ApplicationInstanceId": "The node instances' application instance ID.
",
"RemoveApplicationInstanceRequest$ApplicationInstanceId": "An application instance ID.
"
@@ -547,9 +559,16 @@
"base": null,
"refs": {
"EthernetStatus$IpAddress": "The device's IP address.
",
+ "NtpStatus$IpAddress": "The IP address of the server.
",
"StaticIpConnectionInfo$IpAddress": "The connection's IP address.
"
}
},
+ "IpAddressOrServerName": {
+ "base": null,
+ "refs": {
+ "NtpServerList$member": null
+ }
+ },
"Job": {
"base": "A job for a device.
",
"refs": {
@@ -612,9 +631,16 @@
"DescribeNodeFromTemplateJobResponse$LastUpdatedTime": "When the job was updated.
",
"DescribePackageImportJobResponse$LastUpdatedTime": "When the job was updated.
",
"Device$LastUpdatedTime": "When the device was updated.
",
+ "NetworkStatus$LastUpdatedTime": "When the network status changed.
",
"PackageImportJob$LastUpdatedTime": "When the job was updated.
"
}
},
+ "LatestAlternateSoftware": {
+ "base": null,
+ "refs": {
+ "DescribeDeviceResponse$LatestAlternateSoftware": "The most recent beta software release.
"
+ }
+ },
"LatestSoftware": {
"base": null,
"refs": {
@@ -790,7 +816,8 @@
"NetworkConnectionStatus": {
"base": null,
"refs": {
- "EthernetStatus$ConnectionStatus": "The device's connection status.
"
+ "EthernetStatus$ConnectionStatus": "The device's connection status.
",
+ "NtpStatus$ConnectionStatus": "The connection's status.
"
}
},
"NetworkPayload": {
@@ -1019,6 +1046,30 @@
"ListNodesResponse$Nodes": "A list of nodes.
"
}
},
+ "NtpPayload": {
+ "base": "Network time protocol (NTP) server settings. Use this option to connect to local NTP servers instead of pool.ntp.org
.
",
+ "refs": {
+ "NetworkPayload$Ntp": "Network time protocol (NTP) server settings.
"
+ }
+ },
+ "NtpServerList": {
+ "base": null,
+ "refs": {
+ "NtpPayload$NtpServers": "NTP servers to use, in order of preference.
"
+ }
+ },
+ "NtpServerName": {
+ "base": null,
+ "refs": {
+ "NtpStatus$NtpServerName": "The domain name of the server.
"
+ }
+ },
+ "NtpStatus": {
+ "base": "Details about an NTP server connection.
",
+ "refs": {
+ "NetworkStatus$NtpStatus": "Details about a network time protocol (NTP) server connection.
"
+ }
+ },
"OTAJobConfig": {
"base": "An over-the-air update (OTA) job configuration.
",
"refs": {
@@ -1028,10 +1079,10 @@
"Object": {
"base": null,
"refs": {
- "StorageLocation$RepoPrefixLocation": "The location's repo prefix.
",
- "StorageLocation$GeneratedPrefixLocation": "The location's generated prefix.
",
"StorageLocation$BinaryPrefixLocation": "The location's binary prefix.
",
- "StorageLocation$ManifestPrefixLocation": "The location's manifest prefix.
"
+ "StorageLocation$GeneratedPrefixLocation": "The location's generated prefix.
",
+ "StorageLocation$ManifestPrefixLocation": "The location's manifest prefix.
",
+ "StorageLocation$RepoPrefixLocation": "The location's repo prefix.
"
}
},
"ObjectKey": {
@@ -1297,10 +1348,10 @@
"base": null,
"refs": {
"AccessDeniedException$Message": null,
+ "ConflictException$ErrorId": "A unique ID for the error.
",
"ConflictException$Message": null,
"ConflictException$ResourceId": "The resource's ID.
",
"ConflictException$ResourceType": "The resource's type.
",
- "ConflictException$ErrorId": "A unique ID for the error.
",
"ConflictExceptionErrorArgument$Name": "The error argument's name.
",
"ConflictExceptionErrorArgument$Value": "The error argument's value.
",
"InternalServerException$Message": null,
@@ -1308,16 +1359,16 @@
"ResourceNotFoundException$ResourceId": "The resource's ID.
",
"ResourceNotFoundException$ResourceType": "The resource's type.
",
"ServiceQuotaExceededException$Message": null,
+ "ServiceQuotaExceededException$QuotaCode": "The name of the limit.
",
"ServiceQuotaExceededException$ResourceId": "The target resource's ID.
",
"ServiceQuotaExceededException$ResourceType": "The target resource's type.
",
- "ServiceQuotaExceededException$QuotaCode": "The name of the limit.
",
"ServiceQuotaExceededException$ServiceCode": "The name of the service.
",
- "ValidationException$Message": null,
"ValidationException$ErrorId": "A unique ID for the error.
",
+ "ValidationException$Message": null,
"ValidationExceptionErrorArgument$Name": "The argument's name.
",
"ValidationExceptionErrorArgument$Value": "The argument's value.
",
- "ValidationExceptionField$Name": "The field's name.
",
- "ValidationExceptionField$Message": "The field's message.
"
+ "ValidationExceptionField$Message": "The field's message.
",
+ "ValidationExceptionField$Name": "The field's name.
"
}
},
"TagKey": {
@@ -1481,6 +1532,12 @@
"refs": {
"ValidationException$Reason": "The reason that validation failed.
"
}
+ },
+ "Version": {
+ "base": null,
+ "refs": {
+ "AlternateSoftwareMetadata$Version": "The appliance software version.
"
+ }
}
}
}
diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json
index cfae92934a4..32ccdb69a08 100644
--- a/models/endpoints/endpoints.json
+++ b/models/endpoints/endpoints.json
@@ -426,6 +426,27 @@
"us-west-2" : { }
}
},
+ "amplifyuibuilder" : {
+ "endpoints" : {
+ "ap-northeast-1" : { },
+ "ap-northeast-2" : { },
+ "ap-south-1" : { },
+ "ap-southeast-1" : { },
+ "ap-southeast-2" : { },
+ "ca-central-1" : { },
+ "eu-central-1" : { },
+ "eu-north-1" : { },
+ "eu-west-1" : { },
+ "eu-west-2" : { },
+ "eu-west-3" : { },
+ "me-south-1" : { },
+ "sa-east-1" : { },
+ "us-east-1" : { },
+ "us-east-2" : { },
+ "us-west-1" : { },
+ "us-west-2" : { }
+ }
+ },
"api.detective" : {
"defaults" : {
"protocols" : [ "https" ]
diff --git a/service/elasticache/api.go b/service/elasticache/api.go
index fbe598cbce6..35494eaeaa6 100644
--- a/service/elasticache/api.go
+++ b/service/elasticache/api.go
@@ -9636,11 +9636,6 @@ type CreateCacheClusterInput struct {
// * Compute optimized: Previous generation: (not recommended) C1 node types:
// cache.c1.xlarge
//
- // * Memory optimized with data tiering: Current generation: R6gd node types
- // (available only for Redis engine version 6.2 onward). cache.r6gd.xlarge,
- // cache.r6gd.2xlarge, cache.r6gd.4xlarge, cache.r6gd.8xlarge, cache.r6gd.12xlarge,
- // cache.r6gd.16xlarge
- //
// * Memory optimized: Current generation: R6g node types (available only
// for Redis engine version 5.0.6 onward and for Memcached engine version
// 1.5.16 onward). cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge,
diff --git a/service/panorama/api.go b/service/panorama/api.go
index 5d87fdaa5b4..6b5e8181505 100644
--- a/service/panorama/api.go
+++ b/service/panorama/api.go
@@ -70,15 +70,15 @@ func (c *Panorama) CreateApplicationInstanceRequest(input *CreateApplicationInst
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ServiceQuotaExceededException
// The request would cause a limit to be exceeded.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/CreateApplicationInstance
func (c *Panorama) CreateApplicationInstance(input *CreateApplicationInstanceInput) (*CreateApplicationInstanceOutput, error) {
req, out := c.CreateApplicationInstanceRequest(input)
@@ -161,15 +161,15 @@ func (c *Panorama) CreateJobForDevicesRequest(input *CreateJobForDevicesInput) (
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/CreateJobForDevices
func (c *Panorama) CreateJobForDevices(input *CreateJobForDevicesInput) (*CreateJobForDevicesOutput, error) {
req, out := c.CreateJobForDevicesRequest(input)
@@ -246,17 +246,17 @@ func (c *Panorama) CreateNodeFromTemplateJobRequest(input *CreateNodeFromTemplat
// API operation CreateNodeFromTemplateJob for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/CreateNodeFromTemplateJob
func (c *Panorama) CreateNodeFromTemplateJob(input *CreateNodeFromTemplateJobInput) (*CreateNodeFromTemplateJobOutput, error) {
@@ -334,17 +334,17 @@ func (c *Panorama) CreatePackageRequest(input *CreatePackageInput) (req *request
// API operation CreatePackage for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/CreatePackage
func (c *Panorama) CreatePackage(input *CreatePackageInput) (*CreatePackageOutput, error) {
@@ -422,17 +422,17 @@ func (c *Panorama) CreatePackageImportJobRequest(input *CreatePackageImportJobIn
// API operation CreatePackageImportJob for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/CreatePackageImportJob
func (c *Panorama) CreatePackageImportJob(input *CreatePackageImportJobInput) (*CreatePackageImportJobOutput, error) {
@@ -516,15 +516,15 @@ func (c *Panorama) DeleteDeviceRequest(input *DeleteDeviceInput) (req *request.R
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DeleteDevice
func (c *Panorama) DeleteDevice(input *DeleteDeviceInput) (*DeleteDeviceOutput, error) {
req, out := c.DeleteDeviceRequest(input)
@@ -594,6 +594,9 @@ func (c *Panorama) DeletePackageRequest(input *DeletePackageInput) (req *request
//
// Deletes a package.
//
+// To delete a package, you need permission to call s3:DeleteObject in addition
+// to permissions for the AWS Panorama API.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -602,21 +605,21 @@ func (c *Panorama) DeletePackageRequest(input *DeletePackageInput) (req *request
// API operation DeletePackage for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
-//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DeletePackage
func (c *Panorama) DeletePackage(input *DeletePackageInput) (*DeletePackageOutput, error) {
req, out := c.DeletePackageRequest(input)
@@ -694,21 +697,21 @@ func (c *Panorama) DeregisterPackageVersionRequest(input *DeregisterPackageVersi
// API operation DeregisterPackageVersion for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
-//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DeregisterPackageVersion
func (c *Panorama) DeregisterPackageVersion(input *DeregisterPackageVersionInput) (*DeregisterPackageVersionOutput, error) {
req, out := c.DeregisterPackageVersionRequest(input)
@@ -761,7 +764,7 @@ func (c *Panorama) DescribeApplicationInstanceRequest(input *DescribeApplication
op := &request.Operation{
Name: opDescribeApplicationInstance,
HTTPMethod: "GET",
- HTTPPath: "/application-instances/{applicationInstanceId}",
+ HTTPPath: "/application-instances/{ApplicationInstanceId}",
}
if input == nil {
@@ -791,15 +794,15 @@ func (c *Panorama) DescribeApplicationInstanceRequest(input *DescribeApplication
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribeApplicationInstance
func (c *Panorama) DescribeApplicationInstance(input *DescribeApplicationInstanceInput) (*DescribeApplicationInstanceOutput, error) {
req, out := c.DescribeApplicationInstanceRequest(input)
@@ -852,7 +855,7 @@ func (c *Panorama) DescribeApplicationInstanceDetailsRequest(input *DescribeAppl
op := &request.Operation{
Name: opDescribeApplicationInstanceDetails,
HTTPMethod: "GET",
- HTTPPath: "/application-instances/{applicationInstanceId}/details",
+ HTTPPath: "/application-instances/{ApplicationInstanceId}/details",
}
if input == nil {
@@ -882,15 +885,15 @@ func (c *Panorama) DescribeApplicationInstanceDetailsRequest(input *DescribeAppl
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribeApplicationInstanceDetails
func (c *Panorama) DescribeApplicationInstanceDetails(input *DescribeApplicationInstanceDetailsInput) (*DescribeApplicationInstanceDetailsOutput, error) {
req, out := c.DescribeApplicationInstanceDetailsRequest(input)
@@ -970,15 +973,15 @@ func (c *Panorama) DescribeDeviceRequest(input *DescribeDeviceInput) (req *reque
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribeDevice
func (c *Panorama) DescribeDevice(input *DescribeDeviceInput) (*DescribeDeviceOutput, error) {
req, out := c.DescribeDeviceRequest(input)
@@ -1061,15 +1064,15 @@ func (c *Panorama) DescribeDeviceJobRequest(input *DescribeDeviceJobInput) (req
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribeDeviceJob
func (c *Panorama) DescribeDeviceJob(input *DescribeDeviceJobInput) (*DescribeDeviceJobOutput, error) {
req, out := c.DescribeDeviceJobRequest(input)
@@ -1152,15 +1155,15 @@ func (c *Panorama) DescribeNodeRequest(input *DescribeNodeInput) (req *request.R
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribeNode
func (c *Panorama) DescribeNode(input *DescribeNodeInput) (*DescribeNodeOutput, error) {
req, out := c.DescribeNodeRequest(input)
@@ -1237,17 +1240,17 @@ func (c *Panorama) DescribeNodeFromTemplateJobRequest(input *DescribeNodeFromTem
// API operation DescribeNodeFromTemplateJob for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribeNodeFromTemplateJob
func (c *Panorama) DescribeNodeFromTemplateJob(input *DescribeNodeFromTemplateJobInput) (*DescribeNodeFromTemplateJobOutput, error) {
@@ -1325,21 +1328,21 @@ func (c *Panorama) DescribePackageRequest(input *DescribePackageInput) (req *req
// API operation DescribePackage for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
-//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribePackage
func (c *Panorama) DescribePackage(input *DescribePackageInput) (*DescribePackageOutput, error) {
req, out := c.DescribePackageRequest(input)
@@ -1416,17 +1419,17 @@ func (c *Panorama) DescribePackageImportJobRequest(input *DescribePackageImportJ
// API operation DescribePackageImportJob for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribePackageImportJob
func (c *Panorama) DescribePackageImportJob(input *DescribePackageImportJobInput) (*DescribePackageImportJobOutput, error) {
@@ -1504,21 +1507,21 @@ func (c *Panorama) DescribePackageVersionRequest(input *DescribePackageVersionIn
// API operation DescribePackageVersion for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
-//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/DescribePackageVersion
func (c *Panorama) DescribePackageVersion(input *DescribePackageVersionInput) (*DescribePackageVersionOutput, error) {
req, out := c.DescribePackageVersionRequest(input)
@@ -1571,7 +1574,7 @@ func (c *Panorama) ListApplicationInstanceDependenciesRequest(input *ListApplica
op := &request.Operation{
Name: opListApplicationInstanceDependencies,
HTTPMethod: "GET",
- HTTPPath: "/application-instances/{applicationInstanceId}/package-dependencies",
+ HTTPPath: "/application-instances/{ApplicationInstanceId}/package-dependencies",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
@@ -1711,7 +1714,7 @@ func (c *Panorama) ListApplicationInstanceNodeInstancesRequest(input *ListApplic
op := &request.Operation{
Name: opListApplicationInstanceNodeInstances,
HTTPMethod: "GET",
- HTTPPath: "/application-instances/{applicationInstanceId}/node-instances",
+ HTTPPath: "/application-instances/{ApplicationInstanceId}/node-instances",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
@@ -2027,12 +2030,12 @@ func (c *Panorama) ListDevicesRequest(input *ListDevicesInput) (req *request.Req
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/ListDevices
func (c *Panorama) ListDevices(input *ListDevicesInput) (*ListDevicesOutput, error) {
req, out := c.ListDevicesRequest(input)
@@ -2173,15 +2176,15 @@ func (c *Panorama) ListDevicesJobsRequest(input *ListDevicesJobsInput) (req *req
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/ListDevicesJobs
func (c *Panorama) ListDevicesJobs(input *ListDevicesJobsInput) (*ListDevicesJobsOutput, error) {
req, out := c.ListDevicesJobsRequest(input)
@@ -2316,17 +2319,17 @@ func (c *Panorama) ListNodeFromTemplateJobsRequest(input *ListNodeFromTemplateJo
// API operation ListNodeFromTemplateJobs for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/ListNodeFromTemplateJobs
func (c *Panorama) ListNodeFromTemplateJobs(input *ListNodeFromTemplateJobsInput) (*ListNodeFromTemplateJobsOutput, error) {
@@ -2605,17 +2608,17 @@ func (c *Panorama) ListPackageImportJobsRequest(input *ListPackageImportJobsInpu
// API operation ListPackageImportJobs for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/ListPackageImportJobs
func (c *Panorama) ListPackageImportJobs(input *ListPackageImportJobsInput) (*ListPackageImportJobsOutput, error) {
@@ -2751,21 +2754,21 @@ func (c *Panorama) ListPackagesRequest(input *ListPackagesInput) (req *request.R
// API operation ListPackages for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
-//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/ListPackages
func (c *Panorama) ListPackages(input *ListPackagesInput) (*ListPackagesOutput, error) {
req, out := c.ListPackagesRequest(input)
@@ -2894,12 +2897,12 @@ func (c *Panorama) ListTagsForResourceRequest(input *ListTagsForResourceInput) (
// API operation ListTagsForResource for usage and error information.
//
// Returned Error Types:
-// * ResourceNotFoundException
-// The target resource was not found.
-//
// * ValidationException
// The request contains an invalid parameter value.
//
+// * ResourceNotFoundException
+// The target resource was not found.
+//
// * InternalServerException
// An internal error occurred.
//
@@ -2988,15 +2991,15 @@ func (c *Panorama) ProvisionDeviceRequest(input *ProvisionDeviceInput) (req *req
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ServiceQuotaExceededException
// The request would cause a limit to be exceeded.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/ProvisionDevice
func (c *Panorama) ProvisionDevice(input *ProvisionDeviceInput) (*ProvisionDeviceOutput, error) {
req, out := c.ProvisionDeviceRequest(input)
@@ -3074,17 +3077,17 @@ func (c *Panorama) RegisterPackageVersionRequest(input *RegisterPackageVersionIn
// API operation RegisterPackageVersion for usage and error information.
//
// Returned Error Types:
+// * ConflictException
+// The target resource is in use.
+//
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
-// * ConflictException
-// The target resource is in use.
+// * InternalServerException
+// An internal error occurred.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/RegisterPackageVersion
func (c *Panorama) RegisterPackageVersion(input *RegisterPackageVersionInput) (*RegisterPackageVersionOutput, error) {
@@ -3138,7 +3141,7 @@ func (c *Panorama) RemoveApplicationInstanceRequest(input *RemoveApplicationInst
op := &request.Operation{
Name: opRemoveApplicationInstance,
HTTPMethod: "DELETE",
- HTTPPath: "/application-instances/{applicationInstanceId}",
+ HTTPPath: "/application-instances/{ApplicationInstanceId}",
}
if input == nil {
@@ -3169,15 +3172,15 @@ func (c *Panorama) RemoveApplicationInstanceRequest(input *RemoveApplicationInst
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/RemoveApplicationInstance
func (c *Panorama) RemoveApplicationInstance(input *RemoveApplicationInstanceInput) (*RemoveApplicationInstanceOutput, error) {
req, out := c.RemoveApplicationInstanceRequest(input)
@@ -3255,12 +3258,12 @@ func (c *Panorama) TagResourceRequest(input *TagResourceInput) (req *request.Req
// API operation TagResource for usage and error information.
//
// Returned Error Types:
-// * ResourceNotFoundException
-// The target resource was not found.
-//
// * ValidationException
// The request contains an invalid parameter value.
//
+// * ResourceNotFoundException
+// The target resource was not found.
+//
// * InternalServerException
// An internal error occurred.
//
@@ -3341,12 +3344,12 @@ func (c *Panorama) UntagResourceRequest(input *UntagResourceInput) (req *request
// API operation UntagResource for usage and error information.
//
// Returned Error Types:
-// * ResourceNotFoundException
-// The target resource was not found.
-//
// * ValidationException
// The request contains an invalid parameter value.
//
+// * ResourceNotFoundException
+// The target resource was not found.
+//
// * InternalServerException
// An internal error occurred.
//
@@ -3432,15 +3435,15 @@ func (c *Panorama) UpdateDeviceMetadataRequest(input *UpdateDeviceMetadataInput)
// * ValidationException
// The request contains an invalid parameter value.
//
-// * InternalServerException
-// An internal error occurred.
-//
// * AccessDeniedException
// The requestor does not have permission to access the target action or resource.
//
// * ResourceNotFoundException
// The target resource was not found.
//
+// * InternalServerException
+// An internal error occurred.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/panorama-2019-07-24/UpdateDeviceMetadata
func (c *Panorama) UpdateDeviceMetadata(input *UpdateDeviceMetadataInput) (*UpdateDeviceMetadataOutput, error) {
req, out := c.UpdateDeviceMetadataRequest(input)
@@ -3527,6 +3530,38 @@ func (s *AccessDeniedException) RequestID() string {
return s.RespMetadata.RequestID
}
+// Details about a beta appliance software update.
+type AlternateSoftwareMetadata struct {
+ _ struct{} `type:"structure"`
+
+ // The appliance software version.
+ Version *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AlternateSoftwareMetadata) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AlternateSoftwareMetadata) GoString() string {
+ return s.String()
+}
+
+// SetVersion sets the Version field's value.
+func (s *AlternateSoftwareMetadata) SetVersion(v string) *AlternateSoftwareMetadata {
+ s.Version = &v
+ return s
+}
+
// An application instance on a device.
type ApplicationInstance struct {
_ struct{} `type:"structure"`
@@ -4783,7 +4818,7 @@ type DescribeApplicationInstanceDetailsInput struct {
// The application instance's ID.
//
// ApplicationInstanceId is a required field
- ApplicationInstanceId *string `location:"uri" locationName:"applicationInstanceId" min:"1" type:"string" required:"true"`
+ ApplicationInstanceId *string `location:"uri" locationName:"ApplicationInstanceId" min:"1" type:"string" required:"true"`
}
// String returns the string representation.
@@ -4926,7 +4961,7 @@ type DescribeApplicationInstanceInput struct {
// The application instance's ID.
//
// ApplicationInstanceId is a required field
- ApplicationInstanceId *string `location:"uri" locationName:"applicationInstanceId" min:"1" type:"string" required:"true"`
+ ApplicationInstanceId *string `location:"uri" locationName:"ApplicationInstanceId" min:"1" type:"string" required:"true"`
}
// String returns the string representation.
@@ -5312,6 +5347,9 @@ func (s *DescribeDeviceJobOutput) SetStatus(v string) *DescribeDeviceJobOutput {
type DescribeDeviceOutput struct {
_ struct{} `type:"structure"`
+ // Beta software releases available for the device.
+ AlternateSoftwares []*AlternateSoftwareMetadata `type:"list"`
+
// The device's ARN.
Arn *string `min:"1" type:"string"`
@@ -5333,6 +5371,9 @@ type DescribeDeviceOutput struct {
// The device's ID.
DeviceId *string `min:"1" type:"string"`
+ // The most recent beta software release.
+ LatestAlternateSoftware *string `min:"1" type:"string"`
+
// The latest software version available for the device.
LatestSoftware *string `min:"1" type:"string"`
@@ -5376,6 +5417,12 @@ func (s DescribeDeviceOutput) GoString() string {
return s.String()
}
+// SetAlternateSoftwares sets the AlternateSoftwares field's value.
+func (s *DescribeDeviceOutput) SetAlternateSoftwares(v []*AlternateSoftwareMetadata) *DescribeDeviceOutput {
+ s.AlternateSoftwares = v
+ return s
+}
+
// SetArn sets the Arn field's value.
func (s *DescribeDeviceOutput) SetArn(v string) *DescribeDeviceOutput {
s.Arn = &v
@@ -5418,6 +5465,12 @@ func (s *DescribeDeviceOutput) SetDeviceId(v string) *DescribeDeviceOutput {
return s
}
+// SetLatestAlternateSoftware sets the LatestAlternateSoftware field's value.
+func (s *DescribeDeviceOutput) SetLatestAlternateSoftware(v string) *DescribeDeviceOutput {
+ s.LatestAlternateSoftware = &v
+ return s
+}
+
// SetLatestSoftware sets the LatestSoftware field's value.
func (s *DescribeDeviceOutput) SetLatestSoftware(v string) *DescribeDeviceOutput {
s.LatestSoftware = &v
@@ -6924,7 +6977,7 @@ type ListApplicationInstanceDependenciesInput struct {
// The application instance's ID.
//
// ApplicationInstanceId is a required field
- ApplicationInstanceId *string `location:"uri" locationName:"applicationInstanceId" min:"1" type:"string" required:"true"`
+ ApplicationInstanceId *string `location:"uri" locationName:"ApplicationInstanceId" min:"1" type:"string" required:"true"`
// The maximum number of application instance dependencies to return in one
// page of results.
@@ -7036,7 +7089,7 @@ type ListApplicationInstanceNodeInstancesInput struct {
// The node instances' application instance ID.
//
// ApplicationInstanceId is a required field
- ApplicationInstanceId *string `location:"uri" locationName:"applicationInstanceId" min:"1" type:"string" required:"true"`
+ ApplicationInstanceId *string `location:"uri" locationName:"ApplicationInstanceId" min:"1" type:"string" required:"true"`
// The maximum number of node instances to return in one page of results.
MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"`
@@ -8066,6 +8119,9 @@ type NetworkPayload struct {
// Settings for Ethernet port 1.
Ethernet1 *EthernetPayload `type:"structure"`
+
+ // Network time protocol (NTP) server settings.
+ Ntp *NtpPayload `type:"structure"`
}
// String returns the string representation.
@@ -8099,6 +8155,11 @@ func (s *NetworkPayload) Validate() error {
invalidParams.AddNested("Ethernet1", err.(request.ErrInvalidParams))
}
}
+ if s.Ntp != nil {
+ if err := s.Ntp.Validate(); err != nil {
+ invalidParams.AddNested("Ntp", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -8118,6 +8179,12 @@ func (s *NetworkPayload) SetEthernet1(v *EthernetPayload) *NetworkPayload {
return s
}
+// SetNtp sets the Ntp field's value.
+func (s *NetworkPayload) SetNtp(v *NtpPayload) *NetworkPayload {
+ s.Ntp = v
+ return s
+}
+
// The network status of a device.
type NetworkStatus struct {
_ struct{} `type:"structure"`
@@ -8127,6 +8194,12 @@ type NetworkStatus struct {
// The status of Ethernet port 1.
Ethernet1Status *EthernetStatus `type:"structure"`
+
+ // When the network status changed.
+ LastUpdatedTime *time.Time `type:"timestamp"`
+
+ // Details about a network time protocol (NTP) server connection.
+ NtpStatus *NtpStatus `type:"structure"`
}
// String returns the string representation.
@@ -8159,6 +8232,18 @@ func (s *NetworkStatus) SetEthernet1Status(v *EthernetStatus) *NetworkStatus {
return s
}
+// SetLastUpdatedTime sets the LastUpdatedTime field's value.
+func (s *NetworkStatus) SetLastUpdatedTime(v time.Time) *NetworkStatus {
+ s.LastUpdatedTime = &v
+ return s
+}
+
+// SetNtpStatus sets the NtpStatus field's value.
+func (s *NetworkStatus) SetNtpStatus(v *NtpStatus) *NetworkStatus {
+ s.NtpStatus = v
+ return s
+}
+
// An application node that represents a camera stream, a model, code, or output.
type Node struct {
_ struct{} `type:"structure"`
@@ -8627,6 +8712,104 @@ func (s *NodeOutputPort) SetType(v string) *NodeOutputPort {
return s
}
+// Network time protocol (NTP) server settings. Use this option to connect to
+// local NTP servers instead of pool.ntp.org.
+type NtpPayload struct {
+ _ struct{} `type:"structure"`
+
+ // NTP servers to use, in order of preference.
+ //
+ // NtpServers is a required field
+ NtpServers []*string `type:"list" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s NtpPayload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s NtpPayload) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *NtpPayload) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "NtpPayload"}
+ if s.NtpServers == nil {
+ invalidParams.Add(request.NewErrParamRequired("NtpServers"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetNtpServers sets the NtpServers field's value.
+func (s *NtpPayload) SetNtpServers(v []*string) *NtpPayload {
+ s.NtpServers = v
+ return s
+}
+
+// Details about an NTP server connection.
+type NtpStatus struct {
+ _ struct{} `type:"structure"`
+
+ // The connection's status.
+ ConnectionStatus *string `type:"string" enum:"NetworkConnectionStatus"`
+
+ // The IP address of the server.
+ IpAddress *string `min:"1" type:"string"`
+
+ // The domain name of the server.
+ NtpServerName *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s NtpStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s NtpStatus) GoString() string {
+ return s.String()
+}
+
+// SetConnectionStatus sets the ConnectionStatus field's value.
+func (s *NtpStatus) SetConnectionStatus(v string) *NtpStatus {
+ s.ConnectionStatus = &v
+ return s
+}
+
+// SetIpAddress sets the IpAddress field's value.
+func (s *NtpStatus) SetIpAddress(v string) *NtpStatus {
+ s.IpAddress = &v
+ return s
+}
+
+// SetNtpServerName sets the NtpServerName field's value.
+func (s *NtpStatus) SetNtpServerName(v string) *NtpStatus {
+ s.NtpServerName = &v
+ return s
+}
+
// An over-the-air update (OTA) job configuration.
type OTAJobConfig struct {
_ struct{} `type:"structure"`
@@ -9497,7 +9680,7 @@ type RemoveApplicationInstanceInput struct {
// An application instance ID.
//
// ApplicationInstanceId is a required field
- ApplicationInstanceId *string `location:"uri" locationName:"applicationInstanceId" min:"1" type:"string" required:"true"`
+ ApplicationInstanceId *string `location:"uri" locationName:"ApplicationInstanceId" min:"1" type:"string" required:"true"`
}
// String returns the string representation.
@@ -10589,6 +10772,9 @@ const (
// NetworkConnectionStatusNotConnected is a NetworkConnectionStatus enum value
NetworkConnectionStatusNotConnected = "NOT_CONNECTED"
+
+ // NetworkConnectionStatusConnecting is a NetworkConnectionStatus enum value
+ NetworkConnectionStatusConnecting = "CONNECTING"
)
// NetworkConnectionStatus_Values returns all elements of the NetworkConnectionStatus enum
@@ -10596,6 +10782,7 @@ func NetworkConnectionStatus_Values() []string {
return []string{
NetworkConnectionStatusConnected,
NetworkConnectionStatusNotConnected,
+ NetworkConnectionStatusConnecting,
}
}
@@ -10686,12 +10873,16 @@ func PackageImportJobStatus_Values() []string {
const (
// PackageImportJobTypeNodePackageVersion is a PackageImportJobType enum value
PackageImportJobTypeNodePackageVersion = "NODE_PACKAGE_VERSION"
+
+ // PackageImportJobTypeMarketplaceNodePackageVersion is a PackageImportJobType enum value
+ PackageImportJobTypeMarketplaceNodePackageVersion = "MARKETPLACE_NODE_PACKAGE_VERSION"
)
// PackageImportJobType_Values returns all elements of the PackageImportJobType enum
func PackageImportJobType_Values() []string {
return []string{
PackageImportJobTypeNodePackageVersion,
+ PackageImportJobTypeMarketplaceNodePackageVersion,
}
}