diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 33d7fd814..6637fae35 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -2999,11 +2999,6 @@ "title": "AutoSubDomainIAMRole", "type": "string" }, - "Certificate": { - "$ref": "#/definitions/AWS::Amplify::Domain.Certificate", - "markdownDescription": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", - "title": "Certificate" - }, "CertificateSettings": { "$ref": "#/definitions/AWS::Amplify::Domain.CertificateSettings", "markdownDescription": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", @@ -3026,11 +3021,6 @@ "markdownDescription": "The setting for the subdomain.", "title": "SubDomainSettings", "type": "array" - }, - "UpdateStatus": { - "markdownDescription": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.", - "title": "UpdateStatus", - "type": "string" } }, "required": [ @@ -9029,7 +9019,7 @@ "type": "string" }, "KmsKeyIdentifier": { - "markdownDescription": "", + "markdownDescription": "The AWS Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.", "title": "KmsKeyIdentifier", "type": "string" }, @@ -9194,7 +9184,7 @@ "items": { "$ref": "#/definitions/AWS::AppConfig::Deployment.DynamicExtensionParameters" }, - "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", + "markdownDescription": "A map of dynamic extension parameter names to values to pass to associated extensions with `PRE_START_DEPLOYMENT` actions.", "title": "DynamicExtensionParameters", "type": "array" }, @@ -9251,17 +9241,17 @@ "additionalProperties": false, "properties": { "ExtensionReference": { - "markdownDescription": "", + "markdownDescription": "The ARN or ID of the extension for which you are inserting a dynamic parameter.", "title": "ExtensionReference", "type": "string" }, "ParameterName": { - "markdownDescription": "", + "markdownDescription": "The parameter name.", "title": "ParameterName", "type": "string" }, "ParameterValue": { - "markdownDescription": "", + "markdownDescription": "The parameter value.", "title": "ParameterValue", "type": "string" } @@ -12644,6 +12634,14 @@ "title": "Namespace", "type": "string" }, + "Permissions": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "Permissions", + "type": "array" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -12713,8 +12711,7 @@ } }, "required": [ - "AccessUrl", - "ApprovedOrigins" + "AccessUrl" ], "type": "object" }, @@ -16996,7 +16993,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of metadata items that you can associate with your VPC connector resource. A tag is a key-value pair.", + "markdownDescription": "A list of metadata items that you can associate with your VPC connector resource. A tag is a key-value pair.\n\n> A `VpcConnector` is immutable, so you cannot update its tags. To change the tags, replace the resource. To replace a `VpcConnector` , you must provide a new combination of security groups.", "title": "Tags", "type": "array" }, @@ -22725,7 +22722,7 @@ "type": "string" }, "Cooldown": { - "markdownDescription": "*Only needed if you use simple scaling policies.*\n\nThe amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `300` seconds", + "markdownDescription": "*Only needed if you use simple scaling policies.*\n\nThe amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `300` seconds", "title": "Cooldown", "type": "string" }, @@ -22740,7 +22737,7 @@ "type": "string" }, "DesiredCapacityType": { - "markdownDescription": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Creating an Auto Scaling group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", + "markdownDescription": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Create a mixed instances group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", "title": "DesiredCapacityType", "type": "string" }, @@ -22750,7 +22747,7 @@ "type": "number" }, "HealthCheckType": { - "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "title": "HealthCheckType", "type": "string" }, @@ -22791,7 +22788,7 @@ "type": "array" }, "MaxInstanceLifetime": { - "markdownDescription": "The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see [Replacing Auto Scaling instances based on maximum instance lifetime](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see [Replace Auto Scaling instances based on maximum instance lifetime](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "MaxInstanceLifetime", "type": "number" }, @@ -22819,7 +22816,7 @@ "title": "MixedInstancesPolicy" }, "NewInstancesProtectedFromScaleIn": { - "markdownDescription": "Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see [Using instance scale-in protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see [Use instance scale-in protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "NewInstancesProtectedFromScaleIn", "type": "boolean" }, @@ -22861,7 +22858,7 @@ "items": { "type": "string" }, - "markdownDescription": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Work with Amazon EC2 Auto Scaling termination policies](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", + "markdownDescription": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Configure termination policies for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", "title": "TerminationPolicies", "type": "array" }, @@ -23189,7 +23186,7 @@ "title": "InstanceRequirements" }, "InstanceType": { - "markdownDescription": "The instance type, such as `m3.xlarge` . You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nYou can specify up to 40 instance types per Auto Scaling group.", + "markdownDescription": "The instance type, such as `m3.xlarge` . You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nYou can specify up to 40 instance types per Auto Scaling group.", "title": "InstanceType", "type": "string" }, @@ -23264,7 +23261,7 @@ "type": "string" }, "RoleARN": { - "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Configure a notification target for a lifecycle hook](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", + "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Prepare to add a lifecycle hook to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", "title": "RoleARN", "type": "string" } @@ -23319,7 +23316,7 @@ "items": { "type": "string" }, - "markdownDescription": "Identifies the metrics to enable.\n\nYou can specify one or more of the following metrics:\n\n- `GroupMinSize`\n- `GroupMaxSize`\n- `GroupDesiredCapacity`\n- `GroupInServiceInstances`\n- `GroupPendingInstances`\n- `GroupStandbyInstances`\n- `GroupTerminatingInstances`\n- `GroupTotalInstances`\n- `GroupInServiceCapacity`\n- `GroupPendingCapacity`\n- `GroupStandbyCapacity`\n- `GroupTerminatingCapacity`\n- `GroupTotalCapacity`\n- `WarmPoolDesiredCapacity`\n- `WarmPoolWarmedCapacity`\n- `WarmPoolPendingCapacity`\n- `WarmPoolTerminatingCapacity`\n- `WarmPoolTotalCapacity`\n- `GroupAndWarmPoolDesiredCapacity`\n- `GroupAndWarmPoolTotalCapacity`\n\nIf you specify `Granularity` and don't specify any metrics, all metrics are enabled.\n\nFor more information, see [Auto Scaling group metrics](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Identifies the metrics to enable.\n\nYou can specify one or more of the following metrics:\n\n- `GroupMinSize`\n- `GroupMaxSize`\n- `GroupDesiredCapacity`\n- `GroupInServiceInstances`\n- `GroupPendingInstances`\n- `GroupStandbyInstances`\n- `GroupTerminatingInstances`\n- `GroupTotalInstances`\n- `GroupInServiceCapacity`\n- `GroupPendingCapacity`\n- `GroupStandbyCapacity`\n- `GroupTerminatingCapacity`\n- `GroupTotalCapacity`\n- `WarmPoolDesiredCapacity`\n- `WarmPoolWarmedCapacity`\n- `WarmPoolPendingCapacity`\n- `WarmPoolTerminatingCapacity`\n- `WarmPoolTotalCapacity`\n- `GroupAndWarmPoolDesiredCapacity`\n- `GroupAndWarmPoolTotalCapacity`\n\nIf you specify `Granularity` and don't specify any metrics, all metrics are enabled.\n\nFor more information, see [Amazon CloudWatch metrics for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "Metrics", "type": "array" } @@ -23499,7 +23496,7 @@ "additionalProperties": false, "properties": { "AssociatePublicIpAddress": { - "markdownDescription": "Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.\n\nIf you specify `true` , each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see [Launching Auto Scaling instances in a VPC](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify this property, you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.", + "markdownDescription": "Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.\n\nIf you specify `true` , each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see [Provide network connectivity for your Auto Scaling instances using Amazon VPC](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify this property, you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -23525,7 +23522,7 @@ "type": "array" }, "EbsOptimized": { - "markdownDescription": "Specifies whether the launch configuration is optimized for EBS I/O ( `true` ) or not ( `false` ). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see [Amazon EBS-optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nThe default value is `false` .", + "markdownDescription": "Specifies whether the launch configuration is optimized for EBS I/O ( `true` ) or not ( `false` ). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see [Amazon EBS-optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nThe default value is `false` .", "title": "EbsOptimized", "type": "boolean" }, @@ -23535,7 +23532,7 @@ "type": "string" }, "ImageId": { - "markdownDescription": "The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see [Finding a Linux AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nIf you specify `InstanceId` , an `ImageId` is not required.", + "markdownDescription": "The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see [Find a Linux AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nIf you specify `InstanceId` , an `ImageId` is not required.", "title": "ImageId", "type": "string" }, @@ -23545,7 +23542,7 @@ "type": "string" }, "InstanceMonitoring": { - "markdownDescription": "Controls whether instances in this group are launched with detailed ( `true` ) or basic ( `false` ) monitoring.\n\nThe default value is `true` (enabled).\n\n> When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see [Configure Monitoring for Auto Scaling Instances](https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Controls whether instances in this group are launched with detailed ( `true` ) or basic ( `false` ) monitoring.\n\nThe default value is `true` (enabled).\n\n> When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see [Configure monitoring for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "InstanceMonitoring", "type": "boolean" }, @@ -23560,7 +23557,7 @@ "type": "string" }, "KeyName": { - "markdownDescription": "The name of the key pair. For more information, see [Amazon EC2 key pairs and Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon EC2 User Guide for Linux Instances* .", + "markdownDescription": "The name of the key pair. For more information, see [Amazon EC2 key pairs and Amazon EC2 instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon EC2 User Guide for Linux Instances* .", "title": "KeyName", "type": "string" }, @@ -23571,11 +23568,11 @@ }, "MetadataOptions": { "$ref": "#/definitions/AWS::AutoScaling::LaunchConfiguration.MetadataOptions", - "markdownDescription": "The metadata options for the instances. For more information, see [Configuring the Instance Metadata Options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "The metadata options for the instances. For more information, see [Configure the instance metadata options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "MetadataOptions" }, "PlacementTenancy": { - "markdownDescription": "The tenancy of the instance, either `default` or `dedicated` . An instance with `dedicated` tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to `default` ), you must set the value of this property to `dedicated` . For more information, see [Configuring instance tenancy with Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-dedicated-instances.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify `PlacementTenancy` , you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.\n\nValid values: `default` | `dedicated`", + "markdownDescription": "The tenancy of the instance, either `default` or `dedicated` . An instance with `dedicated` tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to `default` ), you must set the value of this property to `dedicated` .\n\nIf you specify `PlacementTenancy` , you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.\n\nValid values: `default` | `dedicated`", "title": "PlacementTenancy", "type": "string" }, @@ -23639,7 +23636,7 @@ "type": "boolean" }, "Encrypted": { - "markdownDescription": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.\n\n> If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.\n> \n> If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the AWS managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.\n> \n> For more information, see [Use AWS KMS keys to encrypt Amazon EBS volumes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-data-protection.html#encryption) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see [Requirements for Amazon EBS encryption](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html) in the *Amazon EBS User Guide* . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.\n\n> If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.\n> \n> If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the AWS managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.\n> \n> For more information, see [Use AWS KMS keys to encrypt Amazon EBS volumes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-data-protection.html#encryption) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "Encrypted", "type": "boolean" }, @@ -23664,7 +23661,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nValid values: `standard` | `io1` | `gp2` | `st1` | `sc1` | `gp3`", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .\n\nValid values: `standard` | `io1` | `gp2` | `st1` | `sc1` | `gp3`", "title": "VolumeType", "type": "string" } @@ -23792,7 +23789,7 @@ "type": "string" }, "RoleARN": { - "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Configure a notification target for a lifecycle hook](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", + "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Prepare to add a lifecycle hook to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", "title": "RoleARN", "type": "string" } @@ -23870,7 +23867,7 @@ "type": "string" }, "Cooldown": { - "markdownDescription": "A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.\n\nValid only if the policy type is `SimpleScaling` . For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: None", + "markdownDescription": "A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.\n\nValid only if the policy type is `SimpleScaling` . For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: None", "title": "Cooldown", "type": "string" }, @@ -24113,7 +24110,7 @@ "additionalProperties": false, "properties": { "MaxCapacityBreachBehavior": { - "markdownDescription": "Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to `HonorMaxCapacity` if not specified.\n\nThe following are possible values:\n\n- `HonorMaxCapacity` - Amazon EC2 Auto Scaling cannot scale out capacity higher than the maximum capacity. The maximum capacity is enforced as a hard limit.\n- `IncreaseMaxCapacity` - Amazon EC2 Auto Scaling can scale out capacity higher than the maximum capacity when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for `MaxCapacityBuffer` .", + "markdownDescription": "Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to `HonorMaxCapacity` if not specified.\n\nThe following are possible values:\n\n- `HonorMaxCapacity` - Amazon EC2 Auto Scaling can't increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity.\n- `IncreaseMaxCapacity` - Amazon EC2 Auto Scaling can increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for `MaxCapacityBuffer` .\n\n> Use caution when allowing the maximum capacity to be automatically increased. This can lead to more instances being launched than intended if the increased maximum capacity is not monitored and managed. The increased maximum capacity then becomes the new normal maximum capacity for the Auto Scaling group until you manually update it. The maximum capacity does not automatically decrease back to the original maximum.", "title": "MaxCapacityBreachBehavior", "type": "string" }, @@ -25744,7 +25741,7 @@ }, "BackupPlanTags": { "additionalProperties": true, - "markdownDescription": "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair. The specified tags are assigned to all backups created with this plan.", + "markdownDescription": "The tags to assign to the backup plan.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -25859,7 +25856,7 @@ }, "RecoveryPointTags": { "additionalProperties": true, - "markdownDescription": "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.", + "markdownDescription": "The tags to assign to the resources.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -25933,7 +25930,7 @@ "type": "number" }, "OptInToArchiveForSupportedResources": { - "markdownDescription": "Optional Boolean. If this is true, this setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings.", + "markdownDescription": "If the value is true, your backup plan transitions supported resources to archive (cold) storage tier in accordance with your lifecycle settings.", "title": "OptInToArchiveForSupportedResources", "type": "boolean" } @@ -26189,7 +26186,7 @@ }, "BackupVaultTags": { "additionalProperties": true, - "markdownDescription": "Metadata that you can assign to help organize the resources that you create. Each tag is a key-value pair.", + "markdownDescription": "The tags to assign to the backup vault.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -26344,7 +26341,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags with which to tag your framework.", + "markdownDescription": "The tags to assign to your framework.", "title": "FrameworkTags", "type": "array" } @@ -26432,7 +26429,7 @@ "items": { "$ref": "#/definitions/AWS::Backup::Framework.ControlInputParameter" }, - "markdownDescription": "A list of `ParameterName` and `ParameterValue` pairs.", + "markdownDescription": "The name/value pairs.", "title": "ControlInputParameters", "type": "array" }, @@ -26506,7 +26503,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags to tag your report plan.", + "markdownDescription": "The tags to assign to your report plan.", "title": "ReportPlanTags", "type": "array" }, @@ -26550,7 +26547,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of the format of your reports: `CSV` , `JSON` , or both. If not specified, the default format is `CSV` .", + "markdownDescription": "The format of your reports: `CSV` , `JSON` , or both. If not specified, the default format is `CSV` .", "title": "Formats", "type": "array" }, @@ -26833,7 +26830,7 @@ "type": "string" }, "RestoreTestingSelectionName": { - "markdownDescription": "This is the unique name of the restore testing selection that belongs to the related restore testing plan.", + "markdownDescription": "The unique name of the restore testing selection that belongs to the related restore testing plan.", "title": "RestoreTestingSelectionName", "type": "string" }, @@ -26876,12 +26873,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The tag key (String). The key can't start with `aws:` .\n\nLength Constraints: Minimum length of 1. Maximum length of 128.\n\nPattern: `^(?![aA]{1}[wW]{1}[sS]{1}:)([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$`", + "markdownDescription": "The tag key.", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The value of the key.\n\nLength Constraints: Maximum length of 256.\n\nPattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`", + "markdownDescription": "The tag value.", "title": "Value", "type": "string" } @@ -27860,6 +27857,11 @@ "AWS::Batch::JobDefinition.EksContainerSecurityContext": { "additionalProperties": false, "properties": { + "AllowPrivilegeEscalation": { + "markdownDescription": "Whether or not a container or a Kubernetes pod is allowed to gain more privileges than its parent process. The default value is `false` .", + "title": "AllowPrivilegeEscalation", + "type": "boolean" + }, "Privileged": { "markdownDescription": "When this parameter is `true` , the container is given elevated permissions on the host container instance. The level of permissions are similar to the `root` user permissions. The default value is `false` . This parameter maps to `privileged` policy in the [Privileged pod security policies](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#privileged) in the *Kubernetes documentation* .", "title": "Privileged", @@ -28065,6 +28067,20 @@ }, "type": "object" }, + "AWS::Batch::JobDefinition.ImagePullSecret": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Provides a unique identifier for the `ImagePullSecret` . This object is required when `EksPodProperties$imagePullSecrets` is used.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::Batch::JobDefinition.LinuxParameters": { "additionalProperties": false, "properties": { @@ -28259,6 +28275,14 @@ "title": "HostNetwork", "type": "boolean" }, + "ImagePullSecrets": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.ImagePullSecret" + }, + "markdownDescription": "", + "title": "ImagePullSecrets", + "type": "array" + }, "InitContainers": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" @@ -28886,7 +28910,7 @@ }, "type": "object" }, - "AWS::BillingConductor::BillingGroup": { + "AWS::Bedrock::Agent": { "additionalProperties": false, "properties": { "Condition": { @@ -28921,51 +28945,92 @@ "Properties": { "additionalProperties": false, "properties": { - "AccountGrouping": { - "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.AccountGrouping", - "markdownDescription": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", - "title": "AccountGrouping" + "ActionGroups": { + "items": { + "$ref": "#/definitions/AWS::Bedrock::Agent.AgentActionGroup" + }, + "markdownDescription": "The action groups that belong to an agent.", + "title": "ActionGroups", + "type": "array" }, - "ComputationPreference": { - "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.ComputationPreference", - "markdownDescription": "The preferences and settings that will be used to compute the AWS charges for a billing group.", - "title": "ComputationPreference" + "AgentName": { + "markdownDescription": "The name of the agent.", + "title": "AgentName", + "type": "string" + }, + "AgentResourceRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the agent.", + "title": "AgentResourceRoleArn", + "type": "string" + }, + "AutoPrepare": { + "markdownDescription": "Specifies whether to automatically update the `DRAFT` version of the agent after making changes to the agent. The `DRAFT` version can be continually iterated upon during internal development. By default, this value is `false` .", + "title": "AutoPrepare", + "type": "boolean" + }, + "CustomerEncryptionKeyArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the AWS KMS key that encrypts the agent.", + "title": "CustomerEncryptionKeyArn", + "type": "string" }, "Description": { - "markdownDescription": "The description of the billing group.", + "markdownDescription": "The description of the agent.", "title": "Description", "type": "string" }, - "Name": { - "markdownDescription": "The billing group's name.", - "title": "Name", + "FoundationModel": { + "markdownDescription": "The foundation model used for orchestration by the agent.", + "title": "FoundationModel", "type": "string" }, - "PrimaryAccountId": { - "markdownDescription": "The account ID that serves as the main account in a billing group.", - "title": "PrimaryAccountId", + "IdleSessionTTLInSeconds": { + "markdownDescription": "The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.\n\nA user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.", + "title": "IdleSessionTTLInSeconds", + "type": "number" + }, + "Instruction": { + "markdownDescription": "Instructions that tell the agent what it should do and how it should interact with users.", + "title": "Instruction", "type": "string" }, - "Tags": { + "KnowledgeBases": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::Bedrock::Agent.AgentKnowledgeBase" }, - "markdownDescription": "A map that contains tag keys and tag values that are attached to a billing group.", - "title": "Tags", + "markdownDescription": "The knowledge bases associated with the agent.", + "title": "KnowledgeBases", "type": "array" + }, + "PromptOverrideConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::Agent.PromptOverrideConfiguration", + "markdownDescription": "Contains configurations to override prompt templates in different parts of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html) .", + "title": "PromptOverrideConfiguration" + }, + "SkipResourceInUseCheckOnDelete": { + "markdownDescription": "Specifies whether to delete the resource even if it's in use. By default, this value is `false` .", + "title": "SkipResourceInUseCheckOnDelete", + "type": "boolean" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Tags", + "type": "object" } }, "required": [ - "AccountGrouping", - "ComputationPreference", - "Name", - "PrimaryAccountId" + "AgentName" ], "type": "object" }, "Type": { "enum": [ - "AWS::BillingConductor::BillingGroup" + "AWS::Bedrock::Agent" ], "type": "string" }, @@ -28984,257 +29049,1231 @@ ], "type": "object" }, - "AWS::BillingConductor::BillingGroup.AccountGrouping": { + "AWS::Bedrock::Agent.APISchema": { "additionalProperties": false, "properties": { - "AutoAssociate": { - "markdownDescription": "Specifies if this billing group will automatically associate newly added AWS accounts that join your consolidated billing family.", - "title": "AutoAssociate", - "type": "boolean" + "Payload": { + "markdownDescription": "The JSON or YAML-formatted payload defining the OpenAPI schema for the action group. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "title": "Payload", + "type": "string" }, - "LinkedAccountIds": { - "items": { - "type": "string" - }, - "markdownDescription": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.", - "title": "LinkedAccountIds", - "type": "array" + "S3": { + "$ref": "#/definitions/AWS::Bedrock::Agent.S3Identifier", + "markdownDescription": "Contains details about the S3 object containing the OpenAPI schema for the action group. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "title": "S3" } }, - "required": [ - "LinkedAccountIds" - ], "type": "object" }, - "AWS::BillingConductor::BillingGroup.ComputationPreference": { + "AWS::Bedrock::Agent.ActionGroupExecutor": { "additionalProperties": false, "properties": { - "PricingPlanArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the pricing plan used to compute the AWS charges for a billing group.", - "title": "PricingPlanArn", + "Lambda": { + "markdownDescription": "The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action.", + "title": "Lambda", "type": "string" } }, "required": [ - "PricingPlanArn" + "Lambda" ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem": { + "AWS::Bedrock::Agent.AgentActionGroup": { "additionalProperties": false, "properties": { - "Condition": { - "type": "string" + "ActionGroupExecutor": { + "$ref": "#/definitions/AWS::Bedrock::Agent.ActionGroupExecutor", + "markdownDescription": "The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action.", + "title": "ActionGroupExecutor" }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ActionGroupName": { + "markdownDescription": "The name of the action group.", + "title": "ActionGroupName", "type": "string" }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" + "ActionGroupState": { + "markdownDescription": "Specifies whether the action group is available for the agent to invoke or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request.", + "title": "ActionGroupState", + "type": "string" }, - "Properties": { - "additionalProperties": false, - "properties": { - "AccountId": { - "markdownDescription": "The AWS account in which this custom line item will be applied to.", - "title": "AccountId", - "type": "string" - }, - "BillingGroupArn": { - "markdownDescription": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.", - "title": "BillingGroupArn", - "type": "string" - }, - "BillingPeriodRange": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.BillingPeriodRange", - "markdownDescription": "A time range for which the custom line item is effective.", - "title": "BillingPeriodRange" - }, - "CustomLineItemChargeDetails": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails", - "markdownDescription": "The charge details of a custom line item. It should contain only one of `Flat` or `Percentage` .", - "title": "CustomLineItemChargeDetails" - }, - "Description": { - "markdownDescription": "The custom line item's description. This is shown on the Bills page in association with the charge value.", - "title": "Description", - "type": "string" - }, - "Name": { - "markdownDescription": "The custom line item's name.", - "title": "Name", - "type": "string" - }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "A map that contains tag keys and tag values that are attached to a custom line item.", - "title": "Tags", - "type": "array" - } - }, - "required": [ - "BillingGroupArn", - "Name" - ], - "type": "object" + "ApiSchema": { + "$ref": "#/definitions/AWS::Bedrock::Agent.APISchema", + "markdownDescription": "Contains either details about the S3 object containing the OpenAPI schema for the action group or the JSON or YAML-formatted payload defining the schema. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "title": "ApiSchema" }, - "Type": { - "enum": [ - "AWS::BillingConductor::CustomLineItem" - ], + "Description": { + "markdownDescription": "The description of the action group.", + "title": "Description", "type": "string" }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ParentActionGroupSignature": { + "markdownDescription": "If this field is set as `AMAZON.UserInput` , the agent can request the user for additional information when trying to complete a task. The `description` , `apiSchema` , and `actionGroupExecutor` fields must be blank for this action group.\n\nDuring orchestration, if the agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an [Observation](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_Observation.html) reprompting the user for more information.", + "title": "ParentActionGroupSignature", "type": "string" + }, + "SkipResourceInUseCheckOnDelete": { + "markdownDescription": "Specifies whether to delete the resource even if it's in use. By default, this value is `false` .", + "title": "SkipResourceInUseCheckOnDelete", + "type": "boolean" } }, "required": [ - "Type", - "Properties" + "ActionGroupName" ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.BillingPeriodRange": { + "AWS::Bedrock::Agent.AgentKnowledgeBase": { "additionalProperties": false, "properties": { - "ExclusiveEndBillingPeriod": { - "markdownDescription": "The exclusive end billing period that defines a billing period range where a custom line is applied.", - "title": "ExclusiveEndBillingPeriod", + "Description": { + "markdownDescription": "The description of the association between the agent and the knowledge base.", + "title": "Description", "type": "string" }, - "InclusiveStartBillingPeriod": { - "markdownDescription": "The inclusive start billing period that defines a billing period range where a custom line is applied.", - "title": "InclusiveStartBillingPeriod", + "KnowledgeBaseId": { + "markdownDescription": "The unique identifier of the association between the agent and the knowledge base.", + "title": "KnowledgeBaseId", + "type": "string" + }, + "KnowledgeBaseState": { + "markdownDescription": "Specifies whether to use the knowledge base or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request.", + "title": "KnowledgeBaseState", "type": "string" } }, + "required": [ + "Description", + "KnowledgeBaseId" + ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails": { + "AWS::Bedrock::Agent.InferenceConfiguration": { "additionalProperties": false, "properties": { - "Flat": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails", - "markdownDescription": "A `CustomLineItemFlatChargeDetails` that describes the charge details of a flat custom line item.", - "title": "Flat" + "MaximumLength": { + "markdownDescription": "The maximum number of tokens to allow in the generated response.", + "title": "MaximumLength", + "type": "number" }, - "LineItemFilters": { + "StopSequences": { "items": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.LineItemFilter" + "type": "string" }, - "markdownDescription": "A representation of the line item filter.", - "title": "LineItemFilters", + "markdownDescription": "A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.", + "title": "StopSequences", "type": "array" }, - "Percentage": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails", - "markdownDescription": "A `CustomLineItemPercentageChargeDetails` that describes the charge details of a percentage custom line item.", - "title": "Percentage" + "Temperature": { + "markdownDescription": "The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.", + "title": "Temperature", + "type": "number" }, - "Type": { - "markdownDescription": "The type of the custom line item that indicates whether the charge is a fee or credit.", - "title": "Type", - "type": "string" + "TopK": { + "markdownDescription": "While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for `topK` is the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set `topK` to 50, the model selects the next token from among the top 50 most likely choices.", + "title": "TopK", + "type": "number" + }, + "TopP": { + "markdownDescription": "While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for `Top P` determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set `topP` to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens.", + "title": "TopP", + "type": "number" } }, - "required": [ - "Type" - ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails": { + "AWS::Bedrock::Agent.PromptConfiguration": { "additionalProperties": false, "properties": { - "ChargeValue": { - "markdownDescription": "The custom line item's fixed charge value in USD.", - "title": "ChargeValue", - "type": "number" + "BasePromptTemplate": { + "markdownDescription": "Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see [Prompt template placeholder variables](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-placeholders.html) .", + "title": "BasePromptTemplate", + "type": "string" + }, + "InferenceConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::Agent.InferenceConfiguration", + "markdownDescription": "Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the `promptType` . For more information, see [Inference parameters for foundation models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html) .", + "title": "InferenceConfiguration" + }, + "ParserMode": { + "markdownDescription": "Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the `promptType` . If you set the field as `OVERRIDEN` , the `overrideLambda` field in the [PromptOverrideConfiguration](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_PromptOverrideConfiguration.html) must be specified with the ARN of a Lambda function.", + "title": "ParserMode", + "type": "string" + }, + "PromptCreationMode": { + "markdownDescription": "Specifies whether to override the default prompt template for this `promptType` . Set this value to `OVERRIDDEN` to use the prompt that you provide in the `basePromptTemplate` . If you leave it as `DEFAULT` , the agent uses a default prompt template.", + "title": "PromptCreationMode", + "type": "string" + }, + "PromptState": { + "markdownDescription": "Specifies whether to allow the agent to carry out the step specified in the `promptType` . If you set this value to `DISABLED` , the agent skips that step. The default state for each `promptType` is as follows.\n\n- `PRE_PROCESSING` \u2013 `ENABLED`\n- `ORCHESTRATION` \u2013 `ENABLED`\n- `KNOWLEDGE_BASE_RESPONSE_GENERATION` \u2013 `ENABLED`\n- `POST_PROCESSING` \u2013 `DISABLED`", + "title": "PromptState", + "type": "string" + }, + "PromptType": { + "markdownDescription": "The step in the agent sequence that this prompt configuration applies to.", + "title": "PromptType", + "type": "string" } }, - "required": [ - "ChargeValue" - ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails": { + "AWS::Bedrock::Agent.PromptOverrideConfiguration": { "additionalProperties": false, "properties": { - "ChildAssociatedResources": { + "OverrideLambda": { + "markdownDescription": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` .", + "title": "OverrideLambda", + "type": "string" + }, + "PromptConfigurations": { "items": { - "type": "string" + "$ref": "#/definitions/AWS::Bedrock::Agent.PromptConfiguration" }, - "markdownDescription": "A list of resource ARNs to associate to the percentage custom line item.", - "title": "ChildAssociatedResources", + "markdownDescription": "Contains configurations to override a prompt template in one part of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html) .", + "title": "PromptConfigurations", "type": "array" - }, - "PercentageValue": { - "markdownDescription": "The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.", - "title": "PercentageValue", - "type": "number" } }, "required": [ - "PercentageValue" + "PromptConfigurations" ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.LineItemFilter": { + "AWS::Bedrock::Agent.S3Identifier": { "additionalProperties": false, "properties": { - "Attribute": { - "markdownDescription": "The attribute of the line item filter. This specifies what attribute that you can filter on.", - "title": "Attribute", + "S3BucketName": { + "markdownDescription": "The name of the S3 bucket.", + "title": "S3BucketName", "type": "string" }, - "MatchOption": { - "markdownDescription": "The match criteria of the line item filter. This parameter specifies whether not to include the resource value from the billing group total cost.", - "title": "MatchOption", + "S3ObjectKey": { + "markdownDescription": "The S3 object key containing the resource.", + "title": "S3ObjectKey", "type": "string" - }, - "Values": { - "items": { - "type": "string" - }, - "markdownDescription": "The values of the line item filter. This specifies the values to filter on. Currently, you can only exclude Savings Plan discounts.", - "title": "Values", - "type": "array" } }, - "required": [ - "Attribute", - "MatchOption", - "Values" - ], "type": "object" }, - "AWS::BillingConductor::PricingPlan": { + "AWS::Bedrock::AgentAlias": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AgentAliasName": { + "markdownDescription": "The name of the alias of the agent.", + "title": "AgentAliasName", + "type": "string" + }, + "AgentId": { + "markdownDescription": "The unique identifier of the agent.", + "title": "AgentId", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the alias of the agent.", + "title": "Description", + "type": "string" + }, + "RoutingConfiguration": { + "items": { + "$ref": "#/definitions/AWS::Bedrock::AgentAlias.AgentAliasRoutingConfigurationListItem" + }, + "markdownDescription": "Contains details about the routing configuration of the alias.", + "title": "RoutingConfiguration", + "type": "array" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Tags", + "type": "object" + } + }, + "required": [ + "AgentAliasName", + "AgentId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Bedrock::AgentAlias" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Bedrock::AgentAlias.AgentAliasHistoryEvent": { + "additionalProperties": false, + "properties": { + "EndDate": { + "markdownDescription": "The date that the alias stopped being associated to the version in the `routingConfiguration` object", + "title": "EndDate", + "type": "string" + }, + "RoutingConfiguration": { + "items": { + "$ref": "#/definitions/AWS::Bedrock::AgentAlias.AgentAliasRoutingConfigurationListItem" + }, + "markdownDescription": "Contains details about the version of the agent with which the alias is associated.", + "title": "RoutingConfiguration", + "type": "array" + }, + "StartDate": { + "markdownDescription": "The date that the alias began being associated to the version in the `routingConfiguration` object.", + "title": "StartDate", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Bedrock::AgentAlias.AgentAliasRoutingConfigurationListItem": { + "additionalProperties": false, + "properties": { + "AgentVersion": { + "markdownDescription": "The version of the agent with which the alias is associated.", + "title": "AgentVersion", + "type": "string" + } + }, + "required": [ + "AgentVersion" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DataSourceConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.DataSourceConfiguration", + "markdownDescription": "Contains details about how the data source is stored.", + "title": "DataSourceConfiguration" + }, + "Description": { + "markdownDescription": "The description of the data source.", + "title": "Description", + "type": "string" + }, + "KnowledgeBaseId": { + "markdownDescription": "The unique identifier of the knowledge base to which the data source belongs.", + "title": "KnowledgeBaseId", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the data source.", + "title": "Name", + "type": "string" + }, + "ServerSideEncryptionConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.ServerSideEncryptionConfiguration", + "markdownDescription": "Contains details about the configuration of the server-side encryption.", + "title": "ServerSideEncryptionConfiguration" + }, + "VectorIngestionConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.VectorIngestionConfiguration", + "markdownDescription": "Contains details about how to ingest the documents in the data source.", + "title": "VectorIngestionConfiguration" + } + }, + "required": [ + "DataSourceConfiguration", + "KnowledgeBaseId", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Bedrock::DataSource" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.ChunkingConfiguration": { + "additionalProperties": false, + "properties": { + "ChunkingStrategy": { + "markdownDescription": "Knowledge base can split your source data into chunks. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for `NONE` , then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.\n\n- `FIXED_SIZE` \u2013 Amazon Bedrock splits your source data into chunks of the approximate size that you set in the `fixedSizeChunkingConfiguration` .\n- `NONE` \u2013 Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.", + "title": "ChunkingStrategy", + "type": "string" + }, + "FixedSizeChunkingConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.FixedSizeChunkingConfiguration", + "markdownDescription": "Configurations for when you choose fixed-size chunking. If you set the `chunkingStrategy` as `NONE` , exclude this field.", + "title": "FixedSizeChunkingConfiguration" + } + }, + "required": [ + "ChunkingStrategy" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.DataSourceConfiguration": { + "additionalProperties": false, + "properties": { + "S3Configuration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.S3DataSourceConfiguration", + "markdownDescription": "Contains details about the configuration of the S3 object containing the data source.", + "title": "S3Configuration" + }, + "Type": { + "markdownDescription": "The type of storage for the data source.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "S3Configuration", + "Type" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.FixedSizeChunkingConfiguration": { + "additionalProperties": false, + "properties": { + "MaxTokens": { + "markdownDescription": "The maximum number of tokens to include in a chunk.", + "title": "MaxTokens", + "type": "number" + }, + "OverlapPercentage": { + "markdownDescription": "The percentage of overlap between adjacent chunks of a data source.", + "title": "OverlapPercentage", + "type": "number" + } + }, + "required": [ + "MaxTokens", + "OverlapPercentage" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.S3DataSourceConfiguration": { + "additionalProperties": false, + "properties": { + "BucketArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the bucket that contains the data source.", + "title": "BucketArn", + "type": "string" + }, + "InclusionPrefixes": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of S3 prefixes that define the object containing the data sources. For more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) .", + "title": "InclusionPrefixes", + "type": "array" + } + }, + "required": [ + "BucketArn" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.ServerSideEncryptionConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the AWS KMS key used to encrypt the resource.", + "title": "KmsKeyArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Bedrock::DataSource.VectorIngestionConfiguration": { + "additionalProperties": false, + "properties": { + "ChunkingConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.ChunkingConfiguration", + "markdownDescription": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.", + "title": "ChunkingConfiguration" + } + }, + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the knowledge base.", + "title": "Description", + "type": "string" + }, + "KnowledgeBaseConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.KnowledgeBaseConfiguration", + "markdownDescription": "Contains details about the embeddings configuration of the knowledge base.", + "title": "KnowledgeBaseConfiguration" + }, + "Name": { + "markdownDescription": "The name of the knowledge base.", + "title": "Name", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.", + "title": "RoleArn", + "type": "string" + }, + "StorageConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.StorageConfiguration", + "markdownDescription": "Contains details about the storage configuration of the knowledge base.", + "title": "StorageConfiguration" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Tags", + "type": "object" + } + }, + "required": [ + "KnowledgeBaseConfiguration", + "Name", + "RoleArn", + "StorageConfiguration" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Bedrock::KnowledgeBase" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.KnowledgeBaseConfiguration": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The type of data that the data source is converted into for the knowledge base.", + "title": "Type", + "type": "string" + }, + "VectorKnowledgeBaseConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.VectorKnowledgeBaseConfiguration", + "markdownDescription": "Contains details about the embeddings model that'sused to convert the data source.", + "title": "VectorKnowledgeBaseConfiguration" + } + }, + "required": [ + "Type", + "VectorKnowledgeBaseConfiguration" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.OpenSearchServerlessConfiguration": { + "additionalProperties": false, + "properties": { + "CollectionArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the OpenSearch Service vector store.", + "title": "CollectionArn", + "type": "string" + }, + "FieldMapping": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.OpenSearchServerlessFieldMapping", + "markdownDescription": "Contains the names of the fields to which to map information about the vector store.", + "title": "FieldMapping" + }, + "VectorIndexName": { + "markdownDescription": "The name of the vector store.", + "title": "VectorIndexName", + "type": "string" + } + }, + "required": [ + "CollectionArn", + "FieldMapping", + "VectorIndexName" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.OpenSearchServerlessFieldMapping": { + "additionalProperties": false, + "properties": { + "MetadataField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "title": "MetadataField", + "type": "string" + }, + "TextField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "title": "TextField", + "type": "string" + }, + "VectorField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.", + "title": "VectorField", + "type": "string" + } + }, + "required": [ + "MetadataField", + "TextField", + "VectorField" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.PineconeConfiguration": { + "additionalProperties": false, + "properties": { + "ConnectionString": { + "markdownDescription": "The endpoint URL for your index management page.", + "title": "ConnectionString", + "type": "string" + }, + "CredentialsSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that is linked to your Pinecone API key.", + "title": "CredentialsSecretArn", + "type": "string" + }, + "FieldMapping": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.PineconeFieldMapping", + "markdownDescription": "Contains the names of the fields to which to map information about the vector store.", + "title": "FieldMapping" + }, + "Namespace": { + "markdownDescription": "The namespace to be used to write new data to your database.", + "title": "Namespace", + "type": "string" + } + }, + "required": [ + "ConnectionString", + "CredentialsSecretArn", + "FieldMapping" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.PineconeFieldMapping": { + "additionalProperties": false, + "properties": { + "MetadataField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "title": "MetadataField", + "type": "string" + }, + "TextField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "title": "TextField", + "type": "string" + } + }, + "required": [ + "MetadataField", + "TextField" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.RdsConfiguration": { + "additionalProperties": false, + "properties": { + "CredentialsSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that is linked to your Amazon RDS database.", + "title": "CredentialsSecretArn", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of your Amazon RDS database.", + "title": "DatabaseName", + "type": "string" + }, + "FieldMapping": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.RdsFieldMapping", + "markdownDescription": "Contains the names of the fields to which to map information about the vector store.", + "title": "FieldMapping" + }, + "ResourceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the vector store.", + "title": "ResourceArn", + "type": "string" + }, + "TableName": { + "markdownDescription": "The name of the table in the database.", + "title": "TableName", + "type": "string" + } + }, + "required": [ + "CredentialsSecretArn", + "DatabaseName", + "FieldMapping", + "ResourceArn", + "TableName" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.RdsFieldMapping": { + "additionalProperties": false, + "properties": { + "MetadataField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "title": "MetadataField", + "type": "string" + }, + "PrimaryKeyField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the ID for each entry.", + "title": "PrimaryKeyField", + "type": "string" + }, + "TextField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "title": "TextField", + "type": "string" + }, + "VectorField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.", + "title": "VectorField", + "type": "string" + } + }, + "required": [ + "MetadataField", + "PrimaryKeyField", + "TextField", + "VectorField" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.StorageConfiguration": { + "additionalProperties": false, + "properties": { + "OpensearchServerlessConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.OpenSearchServerlessConfiguration", + "markdownDescription": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.", + "title": "OpensearchServerlessConfiguration" + }, + "PineconeConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.PineconeConfiguration", + "markdownDescription": "Contains the storage configuration of the knowledge base in Pinecone.", + "title": "PineconeConfiguration" + }, + "RdsConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.RdsConfiguration", + "markdownDescription": "Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see [Create a vector index in Amazon RDS](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html) .", + "title": "RdsConfiguration" + }, + "Type": { + "markdownDescription": "The vector store service in which the knowledge base is stored.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.VectorKnowledgeBaseConfiguration": { + "additionalProperties": false, + "properties": { + "EmbeddingModelArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "title": "EmbeddingModelArn", + "type": "string" + } + }, + "required": [ + "EmbeddingModelArn" + ], + "type": "object" + }, + "AWS::BillingConductor::BillingGroup": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AccountGrouping": { + "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.AccountGrouping", + "markdownDescription": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", + "title": "AccountGrouping" + }, + "ComputationPreference": { + "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.ComputationPreference", + "markdownDescription": "The preferences and settings that will be used to compute the AWS charges for a billing group.", + "title": "ComputationPreference" + }, + "Description": { + "markdownDescription": "The description of the billing group.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The billing group's name.", + "title": "Name", + "type": "string" + }, + "PrimaryAccountId": { + "markdownDescription": "The account ID that serves as the main account in a billing group.", + "title": "PrimaryAccountId", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A map that contains tag keys and tag values that are attached to a billing group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "AccountGrouping", + "ComputationPreference", + "Name", + "PrimaryAccountId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::BillingConductor::BillingGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::BillingConductor::BillingGroup.AccountGrouping": { + "additionalProperties": false, + "properties": { + "AutoAssociate": { + "markdownDescription": "Specifies if this billing group will automatically associate newly added AWS accounts that join your consolidated billing family.", + "title": "AutoAssociate", + "type": "boolean" + }, + "LinkedAccountIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.", + "title": "LinkedAccountIds", + "type": "array" + } + }, + "required": [ + "LinkedAccountIds" + ], + "type": "object" + }, + "AWS::BillingConductor::BillingGroup.ComputationPreference": { + "additionalProperties": false, + "properties": { + "PricingPlanArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the pricing plan used to compute the AWS charges for a billing group.", + "title": "PricingPlanArn", + "type": "string" + } + }, + "required": [ + "PricingPlanArn" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AccountId": { + "markdownDescription": "The AWS account in which this custom line item will be applied to.", + "title": "AccountId", + "type": "string" + }, + "BillingGroupArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.", + "title": "BillingGroupArn", + "type": "string" + }, + "BillingPeriodRange": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.BillingPeriodRange", + "markdownDescription": "A time range for which the custom line item is effective.", + "title": "BillingPeriodRange" + }, + "CustomLineItemChargeDetails": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails", + "markdownDescription": "The charge details of a custom line item. It should contain only one of `Flat` or `Percentage` .", + "title": "CustomLineItemChargeDetails" + }, + "Description": { + "markdownDescription": "The custom line item's description. This is shown on the Bills page in association with the charge value.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The custom line item's name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A map that contains tag keys and tag values that are attached to a custom line item.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "BillingGroupArn", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::BillingConductor::CustomLineItem" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.BillingPeriodRange": { + "additionalProperties": false, + "properties": { + "ExclusiveEndBillingPeriod": { + "markdownDescription": "The exclusive end billing period that defines a billing period range where a custom line is applied.", + "title": "ExclusiveEndBillingPeriod", + "type": "string" + }, + "InclusiveStartBillingPeriod": { + "markdownDescription": "The inclusive start billing period that defines a billing period range where a custom line is applied.", + "title": "InclusiveStartBillingPeriod", + "type": "string" + } + }, + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails": { + "additionalProperties": false, + "properties": { + "Flat": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails", + "markdownDescription": "A `CustomLineItemFlatChargeDetails` that describes the charge details of a flat custom line item.", + "title": "Flat" + }, + "LineItemFilters": { + "items": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.LineItemFilter" + }, + "markdownDescription": "A representation of the line item filter.", + "title": "LineItemFilters", + "type": "array" + }, + "Percentage": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails", + "markdownDescription": "A `CustomLineItemPercentageChargeDetails` that describes the charge details of a percentage custom line item.", + "title": "Percentage" + }, + "Type": { + "markdownDescription": "The type of the custom line item that indicates whether the charge is a fee or credit.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails": { + "additionalProperties": false, + "properties": { + "ChargeValue": { + "markdownDescription": "The custom line item's fixed charge value in USD.", + "title": "ChargeValue", + "type": "number" + } + }, + "required": [ + "ChargeValue" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails": { + "additionalProperties": false, + "properties": { + "ChildAssociatedResources": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of resource ARNs to associate to the percentage custom line item.", + "title": "ChildAssociatedResources", + "type": "array" + }, + "PercentageValue": { + "markdownDescription": "The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.", + "title": "PercentageValue", + "type": "number" + } + }, + "required": [ + "PercentageValue" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.LineItemFilter": { + "additionalProperties": false, + "properties": { + "Attribute": { + "markdownDescription": "The attribute of the line item filter. This specifies what attribute that you can filter on.", + "title": "Attribute", + "type": "string" + }, + "MatchOption": { + "markdownDescription": "The match criteria of the line item filter. This parameter specifies whether not to include the resource value from the billing group total cost.", + "title": "MatchOption", + "type": "string" + }, + "Values": { + "items": { + "type": "string" + }, + "markdownDescription": "The values of the line item filter. This specifies the values to filter on. Currently, you can only exclude Savings Plan discounts.", + "title": "Values", + "type": "array" + } + }, + "required": [ + "Attribute", + "MatchOption", + "Values" + ], + "type": "object" + }, + "AWS::BillingConductor::PricingPlan": { "additionalProperties": false, "properties": { "Condition": { @@ -31281,7 +32320,7 @@ "type": "string" }, "TeamsChannelId": { - "markdownDescription": "", + "markdownDescription": "The ID of the Microsoft Teams channel.\n\nTo get the channel ID, open Microsoft Teams, right click on the channel name in the left pane, then choose Copy. An example of the channel ID syntax is: `19%3ab6ef35dc342d56ba5654e6fc6d25a071%40thread.tacv2` .", "title": "TeamsChannelId", "type": "string" }, @@ -32164,7 +33203,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.DifferentialPrivacyColumn" }, - "markdownDescription": "", + "markdownDescription": "The name of the column, such as user_id, that contains the unique identifier of your users, whose privacy you want to protect. If you want to turn on differential privacy for two or more tables in a collaboration, you must configure the same column as the user identifier column in both analysis rules.", "title": "Columns", "type": "array" } @@ -32497,6 +33536,319 @@ ], "type": "object" }, + "AWS::CleanRooms::PrivacyBudgetTemplate": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoRefresh": { + "markdownDescription": "How often the privacy budget refreshes.\n\n> If you plan to regularly bring new data into the collaboration, use `CALENDAR_MONTH` to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queried across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.", + "title": "AutoRefresh", + "type": "string" + }, + "MembershipIdentifier": { + "markdownDescription": "The identifier for a membership resource.", + "title": "MembershipIdentifier", + "type": "string" + }, + "Parameters": { + "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", + "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "title": "Parameters" + }, + "PrivacyBudgetType": { + "markdownDescription": "Specifies the type of the privacy budget template.", + "title": "PrivacyBudgetType", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "AutoRefresh", + "MembershipIdentifier", + "Parameters", + "PrivacyBudgetType" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CleanRooms::PrivacyBudgetTemplate" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::CleanRooms::PrivacyBudgetTemplate.Parameters": { + "additionalProperties": false, + "properties": { + "Epsilon": { + "markdownDescription": "The epsilon value that you want to use.", + "title": "Epsilon", + "type": "number" + }, + "UsersNoisePerQuery": { + "markdownDescription": "Noise added per query is measured in terms of the number of users whose contributions you want to obscure. This value governs the rate at which the privacy budget is depleted.", + "title": "UsersNoisePerQuery", + "type": "number" + } + }, + "required": [ + "Epsilon", + "UsersNoisePerQuery" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the training dataset.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the training dataset.", + "title": "Name", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the `dataSource` field of each dataset.\n\nPassing a role across accounts is not allowed. If you pass a role that isn't in your account, you get an `AccessDeniedException` error.", + "title": "RoleArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50.\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8.\n- Maximum value length - 256 Unicode characters in UTF-8.\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for keys as it is reserved. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has `aws` as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of `aws` do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + }, + "TrainingData": { + "items": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.Dataset" + }, + "markdownDescription": "An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables.", + "title": "TrainingData", + "type": "array" + } + }, + "required": [ + "Name", + "RoleArn", + "TrainingData" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CleanRoomsML::TrainingDataset" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.ColumnSchema": { + "additionalProperties": false, + "properties": { + "ColumnName": { + "markdownDescription": "The name of a column.", + "title": "ColumnName", + "type": "string" + }, + "ColumnTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The data type of column.", + "title": "ColumnTypes", + "type": "array" + } + }, + "required": [ + "ColumnName", + "ColumnTypes" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.DataSource": { + "additionalProperties": false, + "properties": { + "GlueDataSource": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.GlueDataSource", + "markdownDescription": "A GlueDataSource object that defines the catalog ID, database name, and table name for the training data.", + "title": "GlueDataSource" + } + }, + "required": [ + "GlueDataSource" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.Dataset": { + "additionalProperties": false, + "properties": { + "InputConfig": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.DatasetInputConfig", + "markdownDescription": "A DatasetInputConfig object that defines the data source and schema mapping.", + "title": "InputConfig" + }, + "Type": { + "markdownDescription": "What type of information is found in the dataset.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "InputConfig", + "Type" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.DatasetInputConfig": { + "additionalProperties": false, + "properties": { + "DataSource": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.DataSource", + "markdownDescription": "A DataSource object that specifies the Glue data source for the training data.", + "title": "DataSource" + }, + "Schema": { + "items": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.ColumnSchema" + }, + "markdownDescription": "The schema information for the training data.", + "title": "Schema", + "type": "array" + } + }, + "required": [ + "DataSource", + "Schema" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.GlueDataSource": { + "additionalProperties": false, + "properties": { + "CatalogId": { + "markdownDescription": "The Glue catalog that contains the training data.", + "title": "CatalogId", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The Glue database that contains the training data.", + "title": "DatabaseName", + "type": "string" + }, + "TableName": { + "markdownDescription": "The Glue table that contains the training data.", + "title": "TableName", + "type": "string" + } + }, + "required": [ + "DatabaseName", + "TableName" + ], + "type": "object" + }, "AWS::Cloud9::EnvironmentEC2": { "additionalProperties": false, "properties": { @@ -37464,7 +38816,7 @@ "items": { "$ref": "#/definitions/AWS::CloudTrail::EventDataStore.AdvancedEventSelector" }, - "markdownDescription": "The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.\n\nFor more information about how to use advanced event selectors to log CloudTrail events, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include AWS Config configuration items in your event data store, see [Create an event data store for AWS Config configuration items](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-lake-cli.html#lake-cli-create-eds-config) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include non- AWS events in your event data store, see [Create an integration to log events from outside AWS](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-lake-cli.html#lake-cli-create-integration) in the CloudTrail User Guide.", + "markdownDescription": "The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.\n\nFor more information about how to use advanced event selectors to log CloudTrail events, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include AWS Config configuration items in your event data store, see [Create an event data store for AWS Config configuration items](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/lake-eds-cli.html#lake-cli-create-eds-config) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include events outside of AWS events in your event data store, see [Create an integration to log events from outside AWS](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/lake-integrations-cli.html#lake-cli-create-integration) in the CloudTrail User Guide.", "title": "AdvancedEventSelectors", "type": "array" }, @@ -37830,12 +39182,12 @@ "type": "string" }, "S3BucketName": { - "markdownDescription": "Specifies the name of the Amazon S3 bucket designated for publishing log files. See [Amazon S3 Bucket Naming Requirements](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html) .", + "markdownDescription": "Specifies the name of the Amazon S3 bucket designated for publishing log files. See [Amazon S3 Bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .", "title": "S3BucketName", "type": "string" }, "S3KeyPrefix": { - "markdownDescription": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see [Finding Your CloudTrail Log Files](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html) . The maximum length is 200 characters.", + "markdownDescription": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see [Finding Your CloudTrail Log Files](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/get-and-view-cloudtrail-log-files.html#cloudtrail-find-log-files) . The maximum length is 200 characters.", "title": "S3KeyPrefix", "type": "string" }, @@ -38169,6 +39521,14 @@ "title": "Statistic", "type": "string" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.", + "title": "Tags", + "type": "array" + }, "Threshold": { "markdownDescription": "The value to compare with the specified statistic.", "title": "Threshold", @@ -38384,6 +39744,11 @@ "title": "Dimensions", "type": "array" }, + "MetricCharacteristics": { + "$ref": "#/definitions/AWS::CloudWatch::AnomalyDetector.MetricCharacteristics", + "markdownDescription": "Use this object to include parameters to provide information about your metric to CloudWatch to help it build more accurate anomaly detection models. Currently, it includes the `PeriodicSpikes` parameter.", + "title": "MetricCharacteristics" + }, "MetricMathAnomalyDetector": { "$ref": "#/definitions/AWS::CloudWatch::AnomalyDetector.MetricMathAnomalyDetector", "markdownDescription": "The CloudWatch metric math expression for this anomaly detector.", @@ -38499,6 +39864,17 @@ ], "type": "object" }, + "AWS::CloudWatch::AnomalyDetector.MetricCharacteristics": { + "additionalProperties": false, + "properties": { + "PeriodicSpikes": { + "markdownDescription": "Set this parameter to true if values for this metric consistently include spikes that should not be considered to be anomalies. With this set to true, CloudWatch will expect to see spikes that occurred consistently during the model training period, and won't flag future similar spikes as anomalies.", + "title": "PeriodicSpikes", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::CloudWatch::AnomalyDetector.MetricDataQueries": { "additionalProperties": false, "properties": {}, @@ -38740,6 +40116,14 @@ "markdownDescription": "The actions to execute when this alarm transitions to the OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). For more information about creating alarms and the actions that you can specify, see [PutCompositeAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutCompositeAlarm.html) in the *Amazon CloudWatch API Reference* .", "title": "OKActions", "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.", + "title": "Tags", + "type": "array" } }, "required": [ @@ -39877,7 +41261,7 @@ "type": "string" }, "ComputeType": { - "markdownDescription": "The type of compute environment. This determines the number of CPU cores and memory the build environment uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 15 GB memory and 8 vCPUs for builds.\n\nFor more information, see [Build Environment Compute Types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "markdownDescription": "The type of compute environment. This determines the number of CPU cores and memory the build environment uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n- `BUILD_LAMBDA_1GB` : Use up to 1 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_2GB` : Use up to 2 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_4GB` : Use up to 4 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_8GB` : Use up to 8 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_10GB` : Use up to 10 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n\nFor more information, see [Build Environment Compute Types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "title": "ComputeType", "type": "string" }, @@ -40212,7 +41596,7 @@ "type": "boolean" }, "Location": { - "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeStar Connections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", + "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeConnections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", "title": "Location", "type": "string" }, @@ -40297,7 +41681,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of webhook filter. There are eight webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , and `RELEASE_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of eight event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , and `PRERELEASED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` and `PRERELEASED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.", + "markdownDescription": "The type of webhook filter. There are nine webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , `RELEASE_NAME` , and `WORKFLOW_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of nine event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , `PRERELEASED` , and `WORKFLOW_JOB_QUEUED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` , `PRERELEASED` , and `WORKFLOW_JOB_QUEUED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- WORKFLOW_NAME\n\n- A webhook triggers a build when the workflow name matches the regular expression `pattern` .\n\n> Works with `WORKFLOW_JOB_QUEUED` events only.", "title": "Type", "type": "string" } @@ -40503,7 +41887,7 @@ "type": "string" }, "Token": { - "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is the app password.", + "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password.", "title": "Token", "type": "string" }, @@ -40725,6 +42109,91 @@ ], "type": "object" }, + "AWS::CodeConnections::Connection": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ConnectionName": { + "markdownDescription": "The name of the connection. Connection names must be unique in an AWS account .", + "title": "ConnectionName", + "type": "string" + }, + "HostArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the host associated with the connection.", + "title": "HostArn", + "type": "string" + }, + "ProviderType": { + "markdownDescription": "The name of the external provider where your third-party code repository is configured.", + "title": "ProviderType", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ConnectionName" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeConnections::Connection" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::CodeDeploy::Application": { "additionalProperties": false, "properties": { @@ -55716,7 +57185,7 @@ "type": "string" }, "CaptureDdls": { - "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nThe default value is `true` .\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", + "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nIf this value is set to `True` , you don't have to create tables or triggers on the source database.", "title": "CaptureDdls", "type": "boolean" }, @@ -60774,7 +62243,7 @@ "type": "string" }, "ServerCertificate": { - "markdownDescription": "Specifies a file with the certificates that are used to sign the object storage server's certificate (for example, `file:///home/user/.ssh/storage_sys_certificate.pem` ). The file you specify must include the following:\n\n- The certificate of the signing certificate authority (CA)\n- Any intermediate certificates\n- base64 encoding\n- A `.pem` extension\n\nThe file can be up to 32768 bytes (before base64 encoding).\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", + "markdownDescription": "Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single `.pem` file with a full certificate chain (for example, `file:///home/user/.ssh/object_storage_certificates.pem` ).\n\nThe certificate chain might include:\n\n- The object storage system's certificate\n- All intermediate certificates (if there are any)\n- The root certificate of the signing CA\n\nYou can concatenate your certificates into a `.pem` file (which can be up to 32768 bytes before base64 encoding). The following example `cat` command creates an `object_storage_certificates.pem` file that includes three certificates:\n\n`cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem`\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", "title": "ServerCertificate", "type": "string" }, @@ -61834,6 +63303,11 @@ "AWS::DataZone::DataSource.GlueRunConfigurationInput": { "additionalProperties": false, "properties": { + "AutoImportDataQualityResult": { + "markdownDescription": "", + "title": "AutoImportDataQualityResult", + "type": "boolean" + }, "DataAccessRole": { "markdownDescription": "The data access role included in the configuration details of the AWS Glue data source.", "title": "DataAccessRole", @@ -62710,7 +64184,7 @@ ], "type": "object" }, - "AWS::Detective::Graph": { + "AWS::Deadline::Farm": { "additionalProperties": false, "properties": { "Condition": { @@ -62745,25 +64219,30 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoEnableMembers": { - "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", - "title": "AutoEnableMembers", - "type": "boolean" + "Description": { + "markdownDescription": "A description of the farm that helps identify what the farm is used for.", + "title": "Description", + "type": "string" }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "The tag values to assign to the new behavior graph.", - "title": "Tags", - "type": "array" + "DisplayName": { + "markdownDescription": "The display name of the farm.", + "title": "DisplayName", + "type": "string" + }, + "KmsKeyArn": { + "markdownDescription": "The ARN for the KMS key.", + "title": "KmsKeyArn", + "type": "string" } }, + "required": [ + "DisplayName" + ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::Graph" + "AWS::Deadline::Farm" ], "type": "string" }, @@ -62777,11 +64256,12 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, - "AWS::Detective::MemberInvitation": { + "AWS::Deadline::Fleet": { "additionalProperties": false, "properties": { "Condition": { @@ -62816,42 +64296,53 @@ "Properties": { "additionalProperties": false, "properties": { - "DisableEmailNotification": { - "markdownDescription": "Whether to send an invitation email to the member account. If set to true, the member account does not receive an invitation email.", - "title": "DisableEmailNotification", - "type": "boolean" + "Configuration": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetConfiguration", + "markdownDescription": "The configuration details for the fleet.", + "title": "Configuration" }, - "GraphArn": { - "markdownDescription": "The ARN of the behavior graph to invite the account to contribute data to.", - "title": "GraphArn", + "Description": { + "markdownDescription": "A description that helps identify what the fleet is used for.", + "title": "Description", "type": "string" }, - "MemberEmailAddress": { - "markdownDescription": "The root user email address of the invited account. If the email address provided is not the root user email address for the provided account, the invitation creation fails.", - "title": "MemberEmailAddress", + "DisplayName": { + "markdownDescription": "The display name of the fleet summary to update.", + "title": "DisplayName", "type": "string" }, - "MemberId": { - "markdownDescription": "The AWS account identifier of the invited account", - "title": "MemberId", + "FarmId": { + "markdownDescription": "The farm ID.", + "title": "FarmId", "type": "string" }, - "Message": { - "markdownDescription": "Customized text to include in the invitation email message.", - "title": "Message", + "MaxWorkerCount": { + "markdownDescription": "The maximum number of workers specified in the fleet.", + "title": "MaxWorkerCount", + "type": "number" + }, + "MinWorkerCount": { + "markdownDescription": "The minimum number of workers in the fleet.", + "title": "MinWorkerCount", + "type": "number" + }, + "RoleArn": { + "markdownDescription": "The IAM role that workers in the fleet use when processing jobs.", + "title": "RoleArn", "type": "string" } }, "required": [ - "GraphArn", - "MemberEmailAddress", - "MemberId" + "Configuration", + "DisplayName", + "MaxWorkerCount", + "RoleArn" ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::MemberInvitation" + "AWS::Deadline::Fleet" ], "type": "string" }, @@ -62870,7 +64361,1191 @@ ], "type": "object" }, - "AWS::Detective::OrganizationAdmin": { + "AWS::Deadline::Fleet.AcceleratorCountRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum GPU for the accelerator.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum GPU for the accelerator.", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.AcceleratorTotalMemoryMiBRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of memory to use for the accelerator, measured in MiB.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of memory to use for the accelerator, measured in MiB.", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.CustomerManagedFleetConfiguration": { + "additionalProperties": false, + "properties": { + "Mode": { + "markdownDescription": "The AWS Auto Scaling mode for the customer managed fleet configuration.", + "title": "Mode", + "type": "string" + }, + "StorageProfileId": { + "markdownDescription": "The storage profile ID.", + "title": "StorageProfileId", + "type": "string" + }, + "WorkerCapabilities": { + "$ref": "#/definitions/AWS::Deadline::Fleet.CustomerManagedWorkerCapabilities", + "markdownDescription": "The worker capabilities for a customer managed fleet configuration.", + "title": "WorkerCapabilities" + } + }, + "required": [ + "Mode", + "WorkerCapabilities" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.CustomerManagedWorkerCapabilities": { + "additionalProperties": false, + "properties": { + "AcceleratorCount": { + "$ref": "#/definitions/AWS::Deadline::Fleet.AcceleratorCountRange", + "markdownDescription": "The range of the accelerator.", + "title": "AcceleratorCount" + }, + "AcceleratorTotalMemoryMiB": { + "$ref": "#/definitions/AWS::Deadline::Fleet.AcceleratorTotalMemoryMiBRange", + "markdownDescription": "The total memory (MiB) for the customer managed worker capabilities.", + "title": "AcceleratorTotalMemoryMiB" + }, + "AcceleratorTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The accelerator types for the customer managed worker capabilities.", + "title": "AcceleratorTypes", + "type": "array" + }, + "CpuArchitectureType": { + "markdownDescription": "The CPU architecture type for the customer managed worker capabilities.", + "title": "CpuArchitectureType", + "type": "string" + }, + "CustomAmounts": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAmountCapability" + }, + "markdownDescription": "Custom requirement ranges for customer managed worker capabilities.", + "title": "CustomAmounts", + "type": "array" + }, + "CustomAttributes": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAttributeCapability" + }, + "markdownDescription": "Custom attributes for the customer manged worker capabilities.", + "title": "CustomAttributes", + "type": "array" + }, + "MemoryMiB": { + "$ref": "#/definitions/AWS::Deadline::Fleet.MemoryMiBRange", + "markdownDescription": "The memory (MiB).", + "title": "MemoryMiB" + }, + "OsFamily": { + "markdownDescription": "The operating system (OS) family.", + "title": "OsFamily", + "type": "string" + }, + "VCpuCount": { + "$ref": "#/definitions/AWS::Deadline::Fleet.VCpuCountRange", + "markdownDescription": "The vCPU count for the customer manged worker capabilities.", + "title": "VCpuCount" + } + }, + "required": [ + "CpuArchitectureType", + "MemoryMiB", + "OsFamily", + "VCpuCount" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.Ec2EbsVolume": { + "additionalProperties": false, + "properties": { + "Iops": { + "markdownDescription": "The IOPS per volume.", + "title": "Iops", + "type": "number" + }, + "SizeGiB": { + "markdownDescription": "The EBS volume size in GiB.", + "title": "SizeGiB", + "type": "number" + }, + "ThroughputMiB": { + "markdownDescription": "The throughput per volume in MiB.", + "title": "ThroughputMiB", + "type": "number" + } + }, + "type": "object" + }, + "AWS::Deadline::Fleet.FleetAmountCapability": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of the fleet worker capability.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of fleet worker capability.", + "title": "Min", + "type": "number" + }, + "Name": { + "markdownDescription": "The name of the fleet capability.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Min", + "Name" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.FleetAttributeCapability": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the fleet attribute capability for the worker.", + "title": "Name", + "type": "string" + }, + "Values": { + "items": { + "type": "string" + }, + "markdownDescription": "The number of fleet attribute capabilities.", + "title": "Values", + "type": "array" + } + }, + "required": [ + "Name", + "Values" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.FleetCapabilities": { + "additionalProperties": false, + "properties": { + "Amounts": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAmountCapability" + }, + "markdownDescription": "Amount capabilities of the fleet.", + "title": "Amounts", + "type": "array" + }, + "Attributes": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAttributeCapability" + }, + "markdownDescription": "Attribute capabilities of the fleet.", + "title": "Attributes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Deadline::Fleet.FleetConfiguration": { + "additionalProperties": false, + "properties": { + "CustomerManaged": { + "$ref": "#/definitions/AWS::Deadline::Fleet.CustomerManagedFleetConfiguration", + "markdownDescription": "The customer managed fleets within a fleet configuration.", + "title": "CustomerManaged" + }, + "ServiceManagedEc2": { + "$ref": "#/definitions/AWS::Deadline::Fleet.ServiceManagedEc2FleetConfiguration", + "markdownDescription": "The service managed Amazon EC2 instances for a fleet configuration.", + "title": "ServiceManagedEc2" + } + }, + "type": "object" + }, + "AWS::Deadline::Fleet.MemoryMiBRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of memory (in MiB).", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of memory (in MiB).", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.ServiceManagedEc2FleetConfiguration": { + "additionalProperties": false, + "properties": { + "InstanceCapabilities": { + "$ref": "#/definitions/AWS::Deadline::Fleet.ServiceManagedEc2InstanceCapabilities", + "markdownDescription": "The Amazon EC2 instance capabilities.", + "title": "InstanceCapabilities" + }, + "InstanceMarketOptions": { + "$ref": "#/definitions/AWS::Deadline::Fleet.ServiceManagedEc2InstanceMarketOptions", + "markdownDescription": "The Amazon EC2 market type.", + "title": "InstanceMarketOptions" + } + }, + "required": [ + "InstanceCapabilities", + "InstanceMarketOptions" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.ServiceManagedEc2InstanceCapabilities": { + "additionalProperties": false, + "properties": { + "AllowedInstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The allowable Amazon EC2 instance types.", + "title": "AllowedInstanceTypes", + "type": "array" + }, + "CpuArchitectureType": { + "markdownDescription": "The CPU architecture type.", + "title": "CpuArchitectureType", + "type": "string" + }, + "CustomAmounts": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAmountCapability" + }, + "markdownDescription": "The custom capability amounts to require for instances in this fleet.", + "title": "CustomAmounts", + "type": "array" + }, + "CustomAttributes": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAttributeCapability" + }, + "markdownDescription": "The custom capability attributes to require for instances in this fleet.", + "title": "CustomAttributes", + "type": "array" + }, + "ExcludedInstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The instance types to exclude from the fleet.", + "title": "ExcludedInstanceTypes", + "type": "array" + }, + "MemoryMiB": { + "$ref": "#/definitions/AWS::Deadline::Fleet.MemoryMiBRange", + "markdownDescription": "The memory, as MiB, for the Amazon EC2 instance type.", + "title": "MemoryMiB" + }, + "OsFamily": { + "markdownDescription": "The operating system (OS) family.", + "title": "OsFamily", + "type": "string" + }, + "RootEbsVolume": { + "$ref": "#/definitions/AWS::Deadline::Fleet.Ec2EbsVolume", + "markdownDescription": "The root EBS volume.", + "title": "RootEbsVolume" + }, + "VCpuCount": { + "$ref": "#/definitions/AWS::Deadline::Fleet.VCpuCountRange", + "markdownDescription": "The amount of vCPU to require for instances in this fleet.", + "title": "VCpuCount" + } + }, + "required": [ + "CpuArchitectureType", + "MemoryMiB", + "OsFamily", + "VCpuCount" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.ServiceManagedEc2InstanceMarketOptions": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The Amazon EC2 instance type.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.VCpuCountRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of vCPU.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of vCPU.", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::LicenseEndpoint": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "SecurityGroupIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The identifier of the Amazon EC2 security group that controls access to the license endpoint.", + "title": "SecurityGroupIds", + "type": "array" + }, + "SubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "Identifies the VPC subnets that can connect to a license endpoint.", + "title": "SubnetIds", + "type": "array" + }, + "VpcId": { + "markdownDescription": "The VCP(virtual private cloud) ID associated with the license endpoint.", + "title": "VpcId", + "type": "string" + } + }, + "required": [ + "SecurityGroupIds", + "SubnetIds", + "VpcId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::LicenseEndpoint" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::MeteredProduct": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Family": { + "markdownDescription": "The family to which the metered product belongs.", + "title": "Family", + "type": "string" + }, + "LicenseEndpointId": { + "markdownDescription": "The Amazon EC2 identifier of the license endpoint.", + "title": "LicenseEndpointId", + "type": "string" + }, + "Port": { + "markdownDescription": "The port on which the metered product should run.", + "title": "Port", + "type": "number" + }, + "ProductId": { + "markdownDescription": "The product ID.", + "title": "ProductId", + "type": "string" + }, + "Vendor": { + "markdownDescription": "The vendor.", + "title": "Vendor", + "type": "string" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::MeteredProduct" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Deadline::Queue": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AllowedStorageProfileIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The identifiers of the storage profiles that this queue can use to share assets between workers using different operating systems.", + "title": "AllowedStorageProfileIds", + "type": "array" + }, + "DefaultBudgetAction": { + "markdownDescription": "The default action taken on a queue summary if a budget wasn't configured.", + "title": "DefaultBudgetAction", + "type": "string" + }, + "Description": { + "markdownDescription": "A description of the queue that helps identify what the queue is used for.", + "title": "Description", + "type": "string" + }, + "DisplayName": { + "markdownDescription": "The display name of the queue summary to update.", + "title": "DisplayName", + "type": "string" + }, + "FarmId": { + "markdownDescription": "The farm ID.", + "title": "FarmId", + "type": "string" + }, + "JobAttachmentSettings": { + "$ref": "#/definitions/AWS::Deadline::Queue.JobAttachmentSettings", + "markdownDescription": "The job attachment settings. These are the Amazon S3 bucket name and the Amazon S3 prefix.", + "title": "JobAttachmentSettings" + }, + "JobRunAsUser": { + "$ref": "#/definitions/AWS::Deadline::Queue.JobRunAsUser", + "markdownDescription": "Identifies the user for a job.", + "title": "JobRunAsUser" + }, + "RequiredFileSystemLocationNames": { + "items": { + "type": "string" + }, + "markdownDescription": "The file system location that the queue uses.", + "title": "RequiredFileSystemLocationNames", + "type": "array" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role that workers use when running jobs in this queue.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "DisplayName" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::Queue" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::Queue.JobAttachmentSettings": { + "additionalProperties": false, + "properties": { + "RootPrefix": { + "markdownDescription": "The root prefix.", + "title": "RootPrefix", + "type": "string" + }, + "S3BucketName": { + "markdownDescription": "The Amazon S3 bucket name.", + "title": "S3BucketName", + "type": "string" + } + }, + "required": [ + "RootPrefix", + "S3BucketName" + ], + "type": "object" + }, + "AWS::Deadline::Queue.JobRunAsUser": { + "additionalProperties": false, + "properties": { + "Posix": { + "$ref": "#/definitions/AWS::Deadline::Queue.PosixUser", + "markdownDescription": "The user and group that the jobs in the queue run as.", + "title": "Posix" + }, + "RunAs": { + "markdownDescription": "Specifies whether the job should run using the queue's system user or if the job should run using the worker agent system user.", + "title": "RunAs", + "type": "string" + }, + "Windows": { + "$ref": "#/definitions/AWS::Deadline::Queue.WindowsUser", + "markdownDescription": "Identifies a Microsoft Windows user.", + "title": "Windows" + } + }, + "required": [ + "RunAs" + ], + "type": "object" + }, + "AWS::Deadline::Queue.PosixUser": { + "additionalProperties": false, + "properties": { + "Group": { + "markdownDescription": "The name of the POSIX user's group.", + "title": "Group", + "type": "string" + }, + "User": { + "markdownDescription": "The name of the POSIX user.", + "title": "User", + "type": "string" + } + }, + "required": [ + "Group", + "User" + ], + "type": "object" + }, + "AWS::Deadline::Queue.WindowsUser": { + "additionalProperties": false, + "properties": { + "PasswordArn": { + "markdownDescription": "The password ARN for the Windows user.", + "title": "PasswordArn", + "type": "string" + }, + "User": { + "markdownDescription": "The user.", + "title": "User", + "type": "string" + } + }, + "required": [ + "PasswordArn", + "User" + ], + "type": "object" + }, + "AWS::Deadline::QueueEnvironment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "FarmId": { + "markdownDescription": "The identifier assigned to the farm that contains the queue.", + "title": "FarmId", + "type": "string" + }, + "Priority": { + "markdownDescription": "The queue environment's priority.", + "title": "Priority", + "type": "number" + }, + "QueueId": { + "markdownDescription": "The unique identifier of the queue that contains the environment.", + "title": "QueueId", + "type": "string" + }, + "Template": { + "markdownDescription": "A JSON or YAML template the describes the processing environment for the queue.", + "title": "Template", + "type": "string" + }, + "TemplateType": { + "markdownDescription": "Specifies whether the template for the queue environment is JSON or YAML.", + "title": "TemplateType", + "type": "string" + } + }, + "required": [ + "FarmId", + "Priority", + "QueueId", + "Template", + "TemplateType" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::QueueEnvironment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::QueueFleetAssociation": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "FarmId": { + "markdownDescription": "The identifier of the farm that contains the queue and the fleet.", + "title": "FarmId", + "type": "string" + }, + "FleetId": { + "markdownDescription": "The fleet ID.", + "title": "FleetId", + "type": "string" + }, + "QueueId": { + "markdownDescription": "The queue ID.", + "title": "QueueId", + "type": "string" + } + }, + "required": [ + "FarmId", + "FleetId", + "QueueId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::QueueFleetAssociation" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::StorageProfile": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DisplayName": { + "markdownDescription": "The display name of the storage profile summary to update.", + "title": "DisplayName", + "type": "string" + }, + "FarmId": { + "markdownDescription": "The unique identifier of the farm that contains the storage profile.", + "title": "FarmId", + "type": "string" + }, + "FileSystemLocations": { + "items": { + "$ref": "#/definitions/AWS::Deadline::StorageProfile.FileSystemLocation" + }, + "markdownDescription": "Operating system specific file system path to the storage location.", + "title": "FileSystemLocations", + "type": "array" + }, + "OsFamily": { + "markdownDescription": "The operating system (OS) family.", + "title": "OsFamily", + "type": "string" + } + }, + "required": [ + "DisplayName", + "OsFamily" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::StorageProfile" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::StorageProfile.FileSystemLocation": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The location name.", + "title": "Name", + "type": "string" + }, + "Path": { + "markdownDescription": "The file path.", + "title": "Path", + "type": "string" + }, + "Type": { + "markdownDescription": "The type of file.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Name", + "Path", + "Type" + ], + "type": "object" + }, + "AWS::Detective::Graph": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoEnableMembers": { + "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", + "title": "AutoEnableMembers", + "type": "boolean" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tag values to assign to the new behavior graph.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::Graph" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Detective::MemberInvitation": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DisableEmailNotification": { + "markdownDescription": "Whether to send an invitation email to the member account. If set to true, the member account does not receive an invitation email.", + "title": "DisableEmailNotification", + "type": "boolean" + }, + "GraphArn": { + "markdownDescription": "The ARN of the behavior graph to invite the account to contribute data to.", + "title": "GraphArn", + "type": "string" + }, + "MemberEmailAddress": { + "markdownDescription": "The root user email address of the invited account. If the email address provided is not the root user email address for the provided account, the invitation creation fails.", + "title": "MemberEmailAddress", + "type": "string" + }, + "MemberId": { + "markdownDescription": "The AWS account identifier of the invited account", + "title": "MemberId", + "type": "string" + }, + "Message": { + "markdownDescription": "Customized text to include in the invitation email message.", + "title": "Message", + "type": "string" + } + }, + "required": [ + "GraphArn", + "MemberEmailAddress", + "MemberId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::MemberInvitation" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Detective::OrganizationAdmin": { "additionalProperties": false, "properties": { "Condition": { @@ -66312,6 +68987,16 @@ "title": "BgpAsn", "type": "number" }, + "BgpAsnExtended": { + "markdownDescription": "", + "title": "BgpAsnExtended", + "type": "number" + }, + "CertificateArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for the customer gateway certificate.", + "title": "CertificateArn", + "type": "string" + }, "DeviceName": { "markdownDescription": "The name of customer gateway device.", "title": "DeviceName", @@ -66337,7 +69022,6 @@ } }, "required": [ - "BgpAsn", "IpAddress", "Type" ], @@ -68630,7 +71314,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::Instance.ElasticGpuSpecification" }, - "markdownDescription": "Deprecated.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.", + "markdownDescription": "An elastic GPU to associate with the instance.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024.", "title": "ElasticGpuSpecifications", "type": "array" }, @@ -68638,7 +71322,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::Instance.ElasticInferenceAccelerator" }, - "markdownDescription": "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "markdownDescription": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see [Amazon Elastic Inference FAQs](https://docs.aws.amazon.com/machine-learning/elastic-inference/faqs/) .", "title": "ElasticInferenceAccelerators", "type": "array" }, @@ -68707,7 +71391,7 @@ }, "LaunchTemplate": { "$ref": "#/definitions/AWS::EC2::Instance.LaunchTemplateSpecification", - "markdownDescription": "The launch template to use to launch the instances. Any parameters that you specify in the AWS CloudFormation template override the same parameters in the launch template. You can specify either the name or ID of a launch template, but not both.", + "markdownDescription": "The launch template. Any additional parameters that you specify for the new instance overwrite the corresponding parameters included in the launch template.", "title": "LaunchTemplate" }, "LicenseSpecifications": { @@ -69032,17 +71716,17 @@ "additionalProperties": false, "properties": { "LaunchTemplateId": { - "markdownDescription": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", + "markdownDescription": "The ID of the launch template.\n\nYou must specify either the launch template ID or the launch template name, but not both.", "title": "LaunchTemplateId", "type": "string" }, "LaunchTemplateName": { - "markdownDescription": "The name of the launch template.\n\nYou must specify the `LaunchTemplateName` or the `LaunchTemplateId` , but not both.", + "markdownDescription": "The name of the launch template.\n\nYou must specify either the launch template ID or the launch template name, but not both.", "title": "LaunchTemplateName", "type": "string" }, "Version": { - "markdownDescription": "The version number of the launch template.\n\nSpecifying `$Latest` or `$Default` for the template version number is not supported. However, you can specify `LatestVersionNumber` or `DefaultVersionNumber` using the `Fn::GetAtt` intrinsic function. For more information, see [Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#aws-resource-ec2-launchtemplate-return-values-fn--getatt) .", + "markdownDescription": "The version number of the launch template. You must specify this property.\n\nTo specify the default version of the template, use the `Fn::GetAtt` intrinsic function to retrieve the `DefaultVersionNumber` attribute of the launch template. To specify the latest version of the template, use `Fn::GetAtt` to retrieve the `LatestVersionNumber` attribute. For more information, see [AWS::EC2:LaunchTemplate return values for Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#aws-resource-ec2-launchtemplate-return-values-fn--getatt) .", "title": "Version", "type": "string" } @@ -73672,11 +76356,6 @@ "title": "IpProtocol", "type": "string" }, - "SourceSecurityGroupId": { - "markdownDescription": "", - "title": "SourceSecurityGroupId", - "type": "string" - }, "ToPort": { "markdownDescription": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes).", "title": "ToPort", @@ -79762,7 +82441,7 @@ }, "type": "object" }, - "AWS::ECS::CapacityProvider": { + "AWS::ECR::RepositoryCreationTemplate": { "additionalProperties": false, "properties": { "Condition": { @@ -79797,33 +82476,62 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoScalingGroupProvider": { - "$ref": "#/definitions/AWS::ECS::CapacityProvider.AutoScalingGroupProvider", - "markdownDescription": "The Auto Scaling group settings for the capacity provider.", - "title": "AutoScalingGroupProvider" + "AppliedFor": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "AppliedFor", + "type": "array" }, - "Name": { - "markdownDescription": "The name of the capacity provider. If a name is specified, it cannot start with `aws` , `ecs` , or `fargate` . If no name is specified, a default name in the `CFNStackName-CFNResourceName-RandomString` format is used.", - "title": "Name", + "Description": { + "markdownDescription": "", + "title": "Description", "type": "string" }, - "Tags": { + "EncryptionConfiguration": { + "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", + "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "title": "EncryptionConfiguration" + }, + "ImageTagMutability": { + "markdownDescription": "", + "title": "ImageTagMutability", + "type": "string" + }, + "LifecyclePolicy": { + "markdownDescription": "", + "title": "LifecyclePolicy", + "type": "string" + }, + "Prefix": { + "markdownDescription": "", + "title": "Prefix", + "type": "string" + }, + "RepositoryPolicy": { + "markdownDescription": "", + "title": "RepositoryPolicy", + "type": "string" + }, + "ResourceTags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", - "title": "Tags", + "markdownDescription": "The tags attached to the resource.", + "title": "ResourceTags", "type": "array" } }, "required": [ - "AutoScalingGroupProvider" + "AppliedFor", + "Prefix" ], "type": "object" }, "Type": { "enum": [ - "AWS::ECS::CapacityProvider" + "AWS::ECR::RepositoryCreationTemplate" ], "type": "string" }, @@ -79842,67 +82550,26 @@ ], "type": "object" }, - "AWS::ECS::CapacityProvider.AutoScalingGroupProvider": { + "AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration": { "additionalProperties": false, "properties": { - "AutoScalingGroupArn": { - "markdownDescription": "The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.", - "title": "AutoScalingGroupArn", - "type": "string" - }, - "ManagedDraining": { - "markdownDescription": "The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.", - "title": "ManagedDraining", + "EncryptionType": { + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "title": "EncryptionType", "type": "string" }, - "ManagedScaling": { - "$ref": "#/definitions/AWS::ECS::CapacityProvider.ManagedScaling", - "markdownDescription": "The managed scaling settings for the Auto Scaling group capacity provider.", - "title": "ManagedScaling" - }, - "ManagedTerminationProtection": { - "markdownDescription": "The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection. The default is off.\n\n> When using managed termination protection, managed scaling must also be used otherwise managed termination protection doesn't work. \n\nWhen managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions on as well. For more information, see [Instance Protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection) in the *AWS Auto Scaling User Guide* .\n\nWhen managed termination protection is off, your Amazon EC2 instances aren't protected from termination when the Auto Scaling group scales in.", - "title": "ManagedTerminationProtection", + "KmsKey": { + "markdownDescription": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used.", + "title": "KmsKey", "type": "string" } }, "required": [ - "AutoScalingGroupArn" + "EncryptionType" ], "type": "object" }, - "AWS::ECS::CapacityProvider.ManagedScaling": { - "additionalProperties": false, - "properties": { - "InstanceWarmupPeriod": { - "markdownDescription": "The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of `300` seconds is used.", - "title": "InstanceWarmupPeriod", - "type": "number" - }, - "MaximumScalingStepSize": { - "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", - "title": "MaximumScalingStepSize", - "type": "number" - }, - "MinimumScalingStepSize": { - "markdownDescription": "The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter If this parameter is omitted, the default value of `1` is used.\n\nWhen additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the actual demand is less than the minimum scaling step size.\n\nIf you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value and will ignore both the maximum scaling step size as well as the capacity demand.", - "title": "MinimumScalingStepSize", - "type": "number" - }, - "Status": { - "markdownDescription": "Determines whether to use managed scaling for the capacity provider.", - "title": "Status", - "type": "string" - }, - "TargetCapacity": { - "markdownDescription": "The target capacity utilization as a percentage for the capacity provider. The specified value must be greater than `0` and less than or equal to `100` . For example, if you want the capacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a `targetCapacity` of `90` . The default value of `100` percent results in the Amazon EC2 instances in your Auto Scaling group being completely used.", - "title": "TargetCapacity", - "type": "number" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster": { + "AWS::ECS::CapacityProvider": { "additionalProperties": false, "properties": { "Condition": { @@ -79937,59 +82604,33 @@ "Properties": { "additionalProperties": false, "properties": { - "CapacityProviders": { - "items": { - "type": "string" - }, - "markdownDescription": "The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the [CreateService](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) or [RunTask](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) actions.\n\nIf specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be created but not associated with another cluster. New Auto Scaling group capacity providers can be created with the [CreateCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html) API operation.\n\nTo use a AWS Fargate capacity provider, specify either the `FARGATE` or `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.\n\nThe [PutCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html) API operation is used to update the list of available capacity providers for a cluster after the cluster is created.", - "title": "CapacityProviders", - "type": "array" + "AutoScalingGroupProvider": { + "$ref": "#/definitions/AWS::ECS::CapacityProvider.AutoScalingGroupProvider", + "markdownDescription": "The Auto Scaling group settings for the capacity provider.", + "title": "AutoScalingGroupProvider" }, - "ClusterName": { - "markdownDescription": "A user-generated string that you use to identify your cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID for the name.", - "title": "ClusterName", + "Name": { + "markdownDescription": "The name of the capacity provider. If a name is specified, it cannot start with `aws` , `ecs` , or `fargate` . If no name is specified, a default name in the `CFNStackName-CFNResourceName-RandomString` format is used.", + "title": "Name", "type": "string" }, - "ClusterSettings": { - "items": { - "$ref": "#/definitions/AWS::ECS::Cluster.ClusterSettings" - }, - "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.", - "title": "ClusterSettings", - "type": "array" - }, - "Configuration": { - "$ref": "#/definitions/AWS::ECS::Cluster.ClusterConfiguration", - "markdownDescription": "The execute command configuration for the cluster.", - "title": "Configuration" - }, - "DefaultCapacityProviderStrategy": { - "items": { - "$ref": "#/definitions/AWS::ECS::Cluster.CapacityProviderStrategyItem" - }, - "markdownDescription": "The default capacity provider strategy for the cluster. When services or tasks are run in the cluster with no launch type or capacity provider strategy specified, the default capacity provider strategy is used.", - "title": "DefaultCapacityProviderStrategy", - "type": "array" - }, - "ServiceConnectDefaults": { - "$ref": "#/definitions/AWS::ECS::Cluster.ServiceConnectDefaults", - "markdownDescription": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the `enabled` parameter to `true` in the `ServiceConnectConfiguration` . You can set the namespace of each service individually in the `ServiceConnectConfiguration` to override this default parameter.\n\nTasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", - "title": "ServiceConnectDefaults" - }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "markdownDescription": "The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", "title": "Tags", "type": "array" } }, + "required": [ + "AutoScalingGroupProvider" + ], "type": "object" }, "Type": { "enum": [ - "AWS::ECS::Cluster" + "AWS::ECS::CapacityProvider" ], "type": "string" }, @@ -80003,122 +82644,288 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, - "AWS::ECS::Cluster.CapacityProviderStrategyItem": { - "additionalProperties": false, - "properties": { - "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", - "title": "Base", - "type": "number" - }, - "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", - "title": "CapacityProvider", - "type": "string" - }, - "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", - "title": "Weight", - "type": "number" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ClusterConfiguration": { - "additionalProperties": false, - "properties": { - "ExecuteCommandConfiguration": { - "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandConfiguration", - "markdownDescription": "The details of the execute command configuration.", - "title": "ExecuteCommandConfiguration" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ClusterSettings": { + "AWS::ECS::CapacityProvider.AutoScalingGroupProvider": { "additionalProperties": false, "properties": { - "Name": { - "markdownDescription": "The name of the cluster setting. The value is `containerInsights` .", - "title": "Name", + "AutoScalingGroupArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.", + "title": "AutoScalingGroupArn", "type": "string" }, - "Value": { - "markdownDescription": "The value to set for the cluster setting. The supported values are `enabled` and `disabled` .\n\nIf you set `name` to `containerInsights` and `value` to `enabled` , CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the `containerInsights` account setting is turned on. If a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", - "title": "Value", - "type": "string" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ExecuteCommandConfiguration": { - "additionalProperties": false, - "properties": { - "KmsKeyId": { - "markdownDescription": "Specify an AWS Key Management Service key ID to encrypt the data between the local client and the container.", - "title": "KmsKeyId", + "ManagedDraining": { + "markdownDescription": "The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.", + "title": "ManagedDraining", "type": "string" }, - "LogConfiguration": { - "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandLogConfiguration", - "markdownDescription": "The log configuration for the results of the execute command actions. The logs can be sent to CloudWatch Logs or an Amazon S3 bucket. When `logging=OVERRIDE` is specified, a `logConfiguration` must be provided.", - "title": "LogConfiguration" + "ManagedScaling": { + "$ref": "#/definitions/AWS::ECS::CapacityProvider.ManagedScaling", + "markdownDescription": "The managed scaling settings for the Auto Scaling group capacity provider.", + "title": "ManagedScaling" }, - "Logging": { - "markdownDescription": "The log setting to use for redirecting logs for your execute command results. The following log settings are available.\n\n- `NONE` : The execute command session is not logged.\n- `DEFAULT` : The `awslogs` configuration in the task definition is used. If no logging parameter is specified, it defaults to this value. If no `awslogs` log driver is configured in the task definition, the output won't be logged.\n- `OVERRIDE` : Specify the logging details as a part of `logConfiguration` . If the `OVERRIDE` logging option is specified, the `logConfiguration` is required.", - "title": "Logging", + "ManagedTerminationProtection": { + "markdownDescription": "The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection. The default is off.\n\n> When using managed termination protection, managed scaling must also be used otherwise managed termination protection doesn't work. \n\nWhen managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions on as well. For more information, see [Instance Protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection) in the *AWS Auto Scaling User Guide* .\n\nWhen managed termination protection is off, your Amazon EC2 instances aren't protected from termination when the Auto Scaling group scales in.", + "title": "ManagedTerminationProtection", "type": "string" } }, + "required": [ + "AutoScalingGroupArn" + ], "type": "object" }, - "AWS::ECS::Cluster.ExecuteCommandLogConfiguration": { + "AWS::ECS::CapacityProvider.ManagedScaling": { "additionalProperties": false, "properties": { - "CloudWatchEncryptionEnabled": { - "markdownDescription": "Determines whether to use encryption on the CloudWatch logs. If not specified, encryption will be off.", - "title": "CloudWatchEncryptionEnabled", - "type": "boolean" - }, - "CloudWatchLogGroupName": { - "markdownDescription": "The name of the CloudWatch log group to send logs to.\n\n> The CloudWatch log group must already be created.", - "title": "CloudWatchLogGroupName", - "type": "string" + "InstanceWarmupPeriod": { + "markdownDescription": "The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of `300` seconds is used.", + "title": "InstanceWarmupPeriod", + "type": "number" }, - "S3BucketName": { - "markdownDescription": "The name of the S3 bucket to send logs to.\n\n> The S3 bucket must already be created.", - "title": "S3BucketName", - "type": "string" + "MaximumScalingStepSize": { + "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "title": "MaximumScalingStepSize", + "type": "number" }, - "S3EncryptionEnabled": { - "markdownDescription": "Determines whether to use encryption on the S3 logs. If not specified, encryption is not used.", - "title": "S3EncryptionEnabled", - "type": "boolean" + "MinimumScalingStepSize": { + "markdownDescription": "The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter If this parameter is omitted, the default value of `1` is used.\n\nWhen additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the actual demand is less than the minimum scaling step size.\n\nIf you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value and will ignore both the maximum scaling step size as well as the capacity demand.", + "title": "MinimumScalingStepSize", + "type": "number" }, - "S3KeyPrefix": { - "markdownDescription": "An optional folder in the S3 bucket to place logs in.", - "title": "S3KeyPrefix", - "type": "string" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ServiceConnectDefaults": { - "additionalProperties": false, - "properties": { - "Namespace": { - "markdownDescription": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. The namespace name can include up to 1024 characters. The name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than (<), or slash (/).\n\nIf you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this AWS Region.\n\nIf you enter a new name, a AWS Cloud Map namespace will be created. Amazon ECS creates a AWS Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the AWS Command Line Interface . Other types of instance discovery aren't used by Service Connect.\n\nIf you update the cluster with an empty string `\"\"` for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in AWS Cloud Map and must be deleted separately.\n\nFor more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* .", - "title": "Namespace", + "Status": { + "markdownDescription": "Determines whether to use managed scaling for the capacity provider.", + "title": "Status", "type": "string" + }, + "TargetCapacity": { + "markdownDescription": "The target capacity utilization as a percentage for the capacity provider. The specified value must be greater than `0` and less than or equal to `100` . For example, if you want the capacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a `targetCapacity` of `90` . The default value of `100` percent results in the Amazon EC2 instances in your Auto Scaling group being completely used.", + "title": "TargetCapacity", + "type": "number" } }, "type": "object" }, - "AWS::ECS::ClusterCapacityProviderAssociations": { + "AWS::ECS::Cluster": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CapacityProviders": { + "items": { + "type": "string" + }, + "markdownDescription": "The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the [CreateService](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) or [RunTask](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) actions.\n\nIf specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be created but not associated with another cluster. New Auto Scaling group capacity providers can be created with the [CreateCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html) API operation.\n\nTo use a AWS Fargate capacity provider, specify either the `FARGATE` or `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.\n\nThe [PutCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html) API operation is used to update the list of available capacity providers for a cluster after the cluster is created.", + "title": "CapacityProviders", + "type": "array" + }, + "ClusterName": { + "markdownDescription": "A user-generated string that you use to identify your cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID for the name.", + "title": "ClusterName", + "type": "string" + }, + "ClusterSettings": { + "items": { + "$ref": "#/definitions/AWS::ECS::Cluster.ClusterSettings" + }, + "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.", + "title": "ClusterSettings", + "type": "array" + }, + "Configuration": { + "$ref": "#/definitions/AWS::ECS::Cluster.ClusterConfiguration", + "markdownDescription": "The execute command configuration for the cluster.", + "title": "Configuration" + }, + "DefaultCapacityProviderStrategy": { + "items": { + "$ref": "#/definitions/AWS::ECS::Cluster.CapacityProviderStrategyItem" + }, + "markdownDescription": "The default capacity provider strategy for the cluster. When services or tasks are run in the cluster with no launch type or capacity provider strategy specified, the default capacity provider strategy is used.", + "title": "DefaultCapacityProviderStrategy", + "type": "array" + }, + "ServiceConnectDefaults": { + "$ref": "#/definitions/AWS::ECS::Cluster.ServiceConnectDefaults", + "markdownDescription": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the `enabled` parameter to `true` in the `ServiceConnectConfiguration` . You can set the namespace of each service individually in the `ServiceConnectConfiguration` to override this default parameter.\n\nTasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "ServiceConnectDefaults" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::ECS::Cluster" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::ECS::Cluster.CapacityProviderStrategyItem": { + "additionalProperties": false, + "properties": { + "Base": { + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", + "title": "Base", + "type": "number" + }, + "CapacityProvider": { + "markdownDescription": "The short name of the capacity provider.", + "title": "CapacityProvider", + "type": "string" + }, + "Weight": { + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", + "title": "Weight", + "type": "number" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ClusterConfiguration": { + "additionalProperties": false, + "properties": { + "ExecuteCommandConfiguration": { + "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandConfiguration", + "markdownDescription": "The details of the execute command configuration.", + "title": "ExecuteCommandConfiguration" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ClusterSettings": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the cluster setting. The value is `containerInsights` .", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value to set for the cluster setting. The supported values are `enabled` and `disabled` .\n\nIf you set `name` to `containerInsights` and `value` to `enabled` , CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the `containerInsights` account setting is turned on. If a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ExecuteCommandConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "markdownDescription": "Specify an AWS Key Management Service key ID to encrypt the data between the local client and the container.", + "title": "KmsKeyId", + "type": "string" + }, + "LogConfiguration": { + "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandLogConfiguration", + "markdownDescription": "The log configuration for the results of the execute command actions. The logs can be sent to CloudWatch Logs or an Amazon S3 bucket. When `logging=OVERRIDE` is specified, a `logConfiguration` must be provided.", + "title": "LogConfiguration" + }, + "Logging": { + "markdownDescription": "The log setting to use for redirecting logs for your execute command results. The following log settings are available.\n\n- `NONE` : The execute command session is not logged.\n- `DEFAULT` : The `awslogs` configuration in the task definition is used. If no logging parameter is specified, it defaults to this value. If no `awslogs` log driver is configured in the task definition, the output won't be logged.\n- `OVERRIDE` : Specify the logging details as a part of `logConfiguration` . If the `OVERRIDE` logging option is specified, the `logConfiguration` is required.", + "title": "Logging", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ExecuteCommandLogConfiguration": { + "additionalProperties": false, + "properties": { + "CloudWatchEncryptionEnabled": { + "markdownDescription": "Determines whether to use encryption on the CloudWatch logs. If not specified, encryption will be off.", + "title": "CloudWatchEncryptionEnabled", + "type": "boolean" + }, + "CloudWatchLogGroupName": { + "markdownDescription": "The name of the CloudWatch log group to send logs to.\n\n> The CloudWatch log group must already be created.", + "title": "CloudWatchLogGroupName", + "type": "string" + }, + "S3BucketName": { + "markdownDescription": "The name of the S3 bucket to send logs to.\n\n> The S3 bucket must already be created.", + "title": "S3BucketName", + "type": "string" + }, + "S3EncryptionEnabled": { + "markdownDescription": "Determines whether to use encryption on the S3 logs. If not specified, encryption is not used.", + "title": "S3EncryptionEnabled", + "type": "boolean" + }, + "S3KeyPrefix": { + "markdownDescription": "An optional folder in the S3 bucket to place logs in.", + "title": "S3KeyPrefix", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ServiceConnectDefaults": { + "additionalProperties": false, + "properties": { + "Namespace": { + "markdownDescription": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. The namespace name can include up to 1024 characters. The name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than (<), or slash (/).\n\nIf you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this AWS Region.\n\nIf you enter a new name, a AWS Cloud Map namespace will be created. Amazon ECS creates a AWS Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the AWS Command Line Interface . Other types of instance discovery aren't used by Service Connect.\n\nIf you update the cluster with an empty string `\"\"` for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in AWS Cloud Map and must be deleted separately.\n\nFor more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* .", + "title": "Namespace", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::ClusterCapacityProviderAssociations": { "additionalProperties": false, "properties": { "Condition": { @@ -81151,7 +83958,7 @@ }, "RuntimePlatform": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.RuntimePlatform", - "markdownDescription": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.\n\nWhen you specify a task definition in a service, this value must match the `runtimePlatform` value of the service.", + "markdownDescription": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", "title": "RuntimePlatform" }, "Tags": { @@ -81628,6 +84435,51 @@ }, "type": "object" }, + "AWS::ECS::TaskDefinition.FSxAuthorizationConfig": { + "additionalProperties": false, + "properties": { + "CredentialsParameter": { + "markdownDescription": "", + "title": "CredentialsParameter", + "type": "string" + }, + "Domain": { + "markdownDescription": "", + "title": "Domain", + "type": "string" + } + }, + "required": [ + "CredentialsParameter", + "Domain" + ], + "type": "object" + }, + "AWS::ECS::TaskDefinition.FSxWindowsFileServerVolumeConfiguration": { + "additionalProperties": false, + "properties": { + "AuthorizationConfig": { + "$ref": "#/definitions/AWS::ECS::TaskDefinition.FSxAuthorizationConfig", + "markdownDescription": "The authorization configuration details for the Amazon FSx for Windows File Server file system.", + "title": "AuthorizationConfig" + }, + "FileSystemId": { + "markdownDescription": "The Amazon FSx for Windows File Server file system ID to use.", + "title": "FileSystemId", + "type": "string" + }, + "RootDirectory": { + "markdownDescription": "The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.", + "title": "RootDirectory", + "type": "string" + } + }, + "required": [ + "FileSystemId", + "RootDirectory" + ], + "type": "object" + }, "AWS::ECS::TaskDefinition.FirelensConfiguration": { "additionalProperties": false, "properties": { @@ -82102,6 +84954,11 @@ "markdownDescription": "This parameter is specified when you use an Amazon Elastic File System file system for task storage.", "title": "EFSVolumeConfiguration" }, + "FSxWindowsFileServerVolumeConfiguration": { + "$ref": "#/definitions/AWS::ECS::TaskDefinition.FSxWindowsFileServerVolumeConfiguration", + "markdownDescription": "This parameter is specified when you use Amazon FSx for Windows File Server file system for task storage.", + "title": "FSxWindowsFileServerVolumeConfiguration" + }, "Host": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HostVolumeProperties", "markdownDescription": "This parameter is specified when you use bind mount host volumes. The contents of the `host` parameter determine whether your bind mount host volume persists on the host container instance and where it's stored. If the `host` parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives. For example, you can mount `C:\\my\\path:C:\\my\\path` and `D:\\:D:\\` , but not `D:\\my\\path:C:\\my\\path` or `D:\\:C:\\my\\path` .", @@ -82752,7 +85609,7 @@ "additionalProperties": false, "properties": { "AvailabilityZoneName": { - "markdownDescription": "The AWS For One Zone file systems, the replication configuration must specify the Availability Zone in which the destination file system is located.\n\nUse the format `us-east-1a` to specify the Availability Zone. For more information about One Zone file systems, see [EFS file system types](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the *Amazon EFS User Guide* .\n\n> One Zone file system type is not available in all Availability Zones in AWS Regions where Amazon EFS is available.", + "markdownDescription": "For One Zone file systems, the replication configuration must specify the Availability Zone in which the destination file system is located.\n\nUse the format `us-east-1a` to specify the Availability Zone. For more information about One Zone file systems, see [EFS file system types](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the *Amazon EFS User Guide* .\n\n> One Zone file system type is not available in all Availability Zones in AWS Regions where Amazon EFS is available.", "title": "AvailabilityZoneName", "type": "string" }, @@ -84817,7 +87674,7 @@ "type": "string" }, "KeepJobFlowAliveWhenNoSteps": { - "markdownDescription": "Specifies whether the cluster should remain available after completing all steps. Defaults to `true` . For more information about configuring cluster termination, see [Control Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html) in the *EMR Management Guide* .", + "markdownDescription": "Specifies whether the cluster should remain available after completing all steps. Defaults to `false` . For more information about configuring cluster termination, see [Control Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html) in the *EMR Management Guide* .", "title": "KeepJobFlowAliveWhenNoSteps", "type": "boolean" }, @@ -88336,6 +91193,11 @@ "title": "Maximum", "type": "number" }, + "Minimum": { + "markdownDescription": "The lower limit for data storage the cache is set to use.", + "title": "Minimum", + "type": "number" + }, "Unit": { "markdownDescription": "The unit that the storage is measured in, in GB.", "title": "Unit", @@ -88343,7 +91205,6 @@ } }, "required": [ - "Maximum", "Unit" ], "type": "object" @@ -88355,11 +91216,13 @@ "markdownDescription": "The configuration for the maximum number of ECPUs the cache can consume per second.", "title": "Maximum", "type": "number" + }, + "Minimum": { + "markdownDescription": "The configuration for the minimum number of ECPUs the cache should be able consume per second.", + "title": "Minimum", + "type": "number" } }, - "required": [ - "Maximum" - ], "type": "object" }, "AWS::ElastiCache::ServerlessCache.Endpoint": { @@ -91902,7 +94765,7 @@ "type": "string" }, "Type": { - "markdownDescription": "", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -91975,6 +94838,176 @@ ], "type": "object" }, + "AWS::EntityResolution::IdNamespace": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the ID namespace.", + "title": "Description", + "type": "string" + }, + "IdMappingWorkflowProperties": { + "items": { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace.IdNamespaceIdMappingWorkflowProperties" + }, + "markdownDescription": "Determines the properties of `IdMappingWorflow` where this `IdNamespace` can be used as a `Source` or a `Target` .", + "title": "IdMappingWorkflowProperties", + "type": "array" + }, + "IdNamespaceName": { + "markdownDescription": "The name of the ID namespace.", + "title": "IdNamespaceName", + "type": "string" + }, + "InputSourceConfig": { + "items": { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace.IdNamespaceInputSource" + }, + "markdownDescription": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", + "title": "InputSourceConfig", + "type": "array" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access the resources defined in this `IdNamespace` on your behalf as part of the workflow run.", + "title": "RoleArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags used to organize, track, or control access for this resource.", + "title": "Tags", + "type": "array" + }, + "Type": { + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "IdNamespaceName", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::EntityResolution::IdNamespace" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::EntityResolution::IdNamespace.IdNamespaceIdMappingWorkflowProperties": { + "additionalProperties": false, + "properties": { + "IdMappingType": { + "markdownDescription": "The type of ID mapping.", + "title": "IdMappingType", + "type": "string" + }, + "ProviderProperties": { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace.NamespaceProviderProperties", + "markdownDescription": "An object which defines any additional configurations required by the provider service.", + "title": "ProviderProperties" + } + }, + "required": [ + "IdMappingType" + ], + "type": "object" + }, + "AWS::EntityResolution::IdNamespace.IdNamespaceInputSource": { + "additionalProperties": false, + "properties": { + "InputSourceARN": { + "markdownDescription": "An AWS Glue table ARN for the input source table.", + "title": "InputSourceARN", + "type": "string" + }, + "SchemaName": { + "markdownDescription": "The name of the schema.", + "title": "SchemaName", + "type": "string" + } + }, + "required": [ + "InputSourceARN" + ], + "type": "object" + }, + "AWS::EntityResolution::IdNamespace.NamespaceProviderProperties": { + "additionalProperties": false, + "properties": { + "ProviderConfiguration": { + "additionalProperties": true, + "markdownDescription": "An object which defines any additional configurations required by the provider service.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "ProviderConfiguration", + "type": "object" + }, + "ProviderServiceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the provider service.", + "title": "ProviderServiceArn", + "type": "string" + } + }, + "required": [ + "ProviderServiceArn" + ], + "type": "object" + }, "AWS::EntityResolution::MatchingWorkflow": { "additionalProperties": false, "properties": { @@ -92273,7 +95306,7 @@ ], "type": "object" }, - "AWS::EntityResolution::SchemaMapping": { + "AWS::EntityResolution::PolicyStatement": { "additionalProperties": false, "properties": { "Condition": { @@ -92308,42 +95341,52 @@ "Properties": { "additionalProperties": false, "properties": { - "Description": { - "markdownDescription": "A description of the schema.", - "title": "Description", - "type": "string" - }, - "MappedInputFields": { + "Action": { "items": { - "$ref": "#/definitions/AWS::EntityResolution::SchemaMapping.SchemaInputAttribute" + "type": "string" }, - "markdownDescription": "A list of `MappedInputFields` . Each `MappedInputField` corresponds to a column the source data table, and contains column name plus additional information that AWS Entity Resolution uses for matching.", - "title": "MappedInputFields", + "markdownDescription": "The action that the principal can use on the resource.\n\nFor example, `entityresolution:GetIdMappingJob` , `entityresolution:GetMatchingJob` .", + "title": "Action", "type": "array" }, - "SchemaName": { - "markdownDescription": "The name of the schema. There can't be multiple `SchemaMappings` with the same name.", - "title": "SchemaName", + "Arn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the resource that will be accessed by the principal.", + "title": "Arn", "type": "string" }, - "Tags": { + "Condition": { + "markdownDescription": "A set of condition keys that you can use in key policies.", + "title": "Condition", + "type": "string" + }, + "Effect": { + "markdownDescription": "Determines whether the permissions specified in the policy are to be allowed ( `Allow` ) or denied ( `Deny` ).", + "title": "Effect", + "type": "string" + }, + "Principal": { "items": { - "$ref": "#/definitions/Tag" + "type": "string" }, - "markdownDescription": "The tags used to organize, track, or control access for this resource.", - "title": "Tags", + "markdownDescription": "The AWS service or AWS account that can access the resource defined as ARN.", + "title": "Principal", "type": "array" + }, + "StatementId": { + "markdownDescription": "A statement identifier that differentiates the statement from others in the same policy.", + "title": "StatementId", + "type": "string" } }, "required": [ - "MappedInputFields", - "SchemaName" + "Arn", + "StatementId" ], "type": "object" }, "Type": { "enum": [ - "AWS::EntityResolution::SchemaMapping" + "AWS::EntityResolution::PolicyStatement" ], "type": "string" }, @@ -92362,42 +95405,7 @@ ], "type": "object" }, - "AWS::EntityResolution::SchemaMapping.SchemaInputAttribute": { - "additionalProperties": false, - "properties": { - "FieldName": { - "markdownDescription": "A string containing the field name.", - "title": "FieldName", - "type": "string" - }, - "GroupName": { - "markdownDescription": "Instruct AWS Entity Resolution to combine several columns into a unified column with the identical attribute type. For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common `GroupName` will prompt AWS Entity Resolution to concatenate them into a single value.", - "title": "GroupName", - "type": "string" - }, - "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, let's consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning the `MatchKey` *Address* to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `MatchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", - "title": "MatchKey", - "type": "string" - }, - "SubType": { - "markdownDescription": "The subtype of the attribute, selected from a list of values.", - "title": "SubType", - "type": "string" - }, - "Type": { - "markdownDescription": "The type of the attribute, selected from a list of values.", - "title": "Type", - "type": "string" - } - }, - "required": [ - "FieldName", - "Type" - ], - "type": "object" - }, - "AWS::EventSchemas::Discoverer": { + "AWS::EntityResolution::SchemaMapping": { "additionalProperties": false, "properties": { "Condition": { @@ -92432,38 +95440,42 @@ "Properties": { "additionalProperties": false, "properties": { - "CrossAccount": { - "markdownDescription": "Allows for the discovery of the event schemas that are sent to the event bus from another account.", - "title": "CrossAccount", - "type": "boolean" - }, "Description": { - "markdownDescription": "A description for the discoverer.", + "markdownDescription": "A description of the schema.", "title": "Description", "type": "string" }, - "SourceArn": { - "markdownDescription": "The ARN of the event bus.", - "title": "SourceArn", + "MappedInputFields": { + "items": { + "$ref": "#/definitions/AWS::EntityResolution::SchemaMapping.SchemaInputAttribute" + }, + "markdownDescription": "A list of `MappedInputFields` . Each `MappedInputField` corresponds to a column the source data table, and contains column name plus additional information that AWS Entity Resolution uses for matching.", + "title": "MappedInputFields", + "type": "array" + }, + "SchemaName": { + "markdownDescription": "The name of the schema. There can't be multiple `SchemaMappings` with the same name.", + "title": "SchemaName", "type": "string" }, "Tags": { "items": { - "$ref": "#/definitions/AWS::EventSchemas::Discoverer.TagsEntry" + "$ref": "#/definitions/Tag" }, - "markdownDescription": "Tags associated with the resource.", + "markdownDescription": "The tags used to organize, track, or control access for this resource.", "title": "Tags", "type": "array" } }, "required": [ - "SourceArn" + "MappedInputFields", + "SchemaName" ], "type": "object" }, "Type": { "enum": [ - "AWS::EventSchemas::Discoverer" + "AWS::EntityResolution::SchemaMapping" ], "type": "string" }, @@ -92482,27 +95494,147 @@ ], "type": "object" }, - "AWS::EventSchemas::Discoverer.TagsEntry": { + "AWS::EntityResolution::SchemaMapping.SchemaInputAttribute": { "additionalProperties": false, "properties": { - "Key": { - "markdownDescription": "The key of a key-value pair.", - "title": "Key", + "FieldName": { + "markdownDescription": "A string containing the field name.", + "title": "FieldName", "type": "string" }, - "Value": { - "markdownDescription": "The value of a key-value pair.", - "title": "Value", + "GroupName": { + "markdownDescription": "A string that instructs AWS Entity Resolution to combine several columns into a unified column with the identical attribute type.\n\nFor example, when working with columns such as `first_name` , `middle_name` , and `last_name` , assigning them a common `groupName` will prompt AWS Entity Resolution to concatenate them into a single value.", + "title": "GroupName", + "type": "string" + }, + "MatchKey": { + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "title": "MatchKey", + "type": "string" + }, + "SubType": { + "markdownDescription": "The subtype of the attribute, selected from a list of values.", + "title": "SubType", + "type": "string" + }, + "Type": { + "markdownDescription": "The type of the attribute, selected from a list of values.", + "title": "Type", "type": "string" } }, "required": [ - "Key", - "Value" + "FieldName", + "Type" ], "type": "object" }, - "AWS::EventSchemas::Registry": { + "AWS::EventSchemas::Discoverer": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CrossAccount": { + "markdownDescription": "Allows for the discovery of the event schemas that are sent to the event bus from another account.", + "title": "CrossAccount", + "type": "boolean" + }, + "Description": { + "markdownDescription": "A description for the discoverer.", + "title": "Description", + "type": "string" + }, + "SourceArn": { + "markdownDescription": "The ARN of the event bus.", + "title": "SourceArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/AWS::EventSchemas::Discoverer.TagsEntry" + }, + "markdownDescription": "Tags associated with the resource.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "SourceArn" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::EventSchemas::Discoverer" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::EventSchemas::Discoverer.TagsEntry": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key of a key-value pair.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of a key-value pair.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, + "AWS::EventSchemas::Registry": { "additionalProperties": false, "properties": { "Condition": { @@ -100773,6 +103905,116 @@ ], "type": "object" }, + "AWS::GlobalAccelerator::CrossAccountAttachment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "", + "title": "Name", + "type": "string" + }, + "Principals": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "Principals", + "type": "array" + }, + "Resources": { + "items": { + "$ref": "#/definitions/AWS::GlobalAccelerator::CrossAccountAttachment.Resource" + }, + "markdownDescription": "", + "title": "Resources", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::GlobalAccelerator::CrossAccountAttachment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::GlobalAccelerator::CrossAccountAttachment.Resource": { + "additionalProperties": false, + "properties": { + "EndpointId": { + "markdownDescription": "The endpoint ID for the endpoint that is specified as a AWS resource.\n\nAn endpoint ID for the cross-account feature is the ARN of an AWS resource, such as a Network Load Balancer, that Global Accelerator supports as an endpoint for an accelerator.", + "title": "EndpointId", + "type": "string" + }, + "Region": { + "markdownDescription": "The AWS Region where a shared endpoint resource is located.", + "title": "Region", + "type": "string" + } + }, + "required": [ + "EndpointId" + ], + "type": "object" + }, "AWS::GlobalAccelerator::EndpointGroup": { "additionalProperties": false, "properties": { @@ -111659,6 +114901,108 @@ ], "type": "object" }, + "AWS::IVS::EncoderConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Encoder cnfiguration name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-encoderconfiguration-tag.html) .", + "title": "Tags", + "type": "array" + }, + "Video": { + "$ref": "#/definitions/AWS::IVS::EncoderConfiguration.Video", + "markdownDescription": "Video configuration. Default: video resolution 1280x720, bitrate 2500 kbps, 30 fps. See the [Video](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-encoderconfiguration-video.html) property type for more information.", + "title": "Video" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::EncoderConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::IVS::EncoderConfiguration.Video": { + "additionalProperties": false, + "properties": { + "Bitrate": { + "markdownDescription": "Bitrate for generated output, in bps. Default: 2500000.", + "title": "Bitrate", + "type": "number" + }, + "Framerate": { + "markdownDescription": "Video frame rate, in fps. Default: 30.", + "title": "Framerate", + "type": "number" + }, + "Height": { + "markdownDescription": "Video-resolution height. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720.", + "title": "Height", + "type": "number" + }, + "Width": { + "markdownDescription": "Video-resolution width. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280.", + "title": "Width", + "type": "number" + } + }, + "type": "object" + }, "AWS::IVS::PlaybackKeyPair": { "additionalProperties": false, "properties": { @@ -111735,6 +115079,103 @@ ], "type": "object" }, + "AWS::IVS::PlaybackRestrictionPolicy": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AllowedCountries": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of country codes that control geoblocking restrictions. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).", + "title": "AllowedCountries", + "type": "array" + }, + "AllowedOrigins": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at [https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin\"](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin)", + "title": "AllowedOrigins", + "type": "array" + }, + "EnableStrictOriginEnforcement": { + "markdownDescription": "Whether channel playback is constrained by the origin site.", + "title": "EnableStrictOriginEnforcement", + "type": "boolean" + }, + "Name": { + "markdownDescription": "Playback-restriction-policy name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-playbackrestrictionpolicy-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "AllowedCountries", + "AllowedOrigins" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::PlaybackRestrictionPolicy" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::IVS::RecordingConfiguration": { "additionalProperties": false, "properties": { @@ -111772,7 +115213,7 @@ "properties": { "DestinationConfiguration": { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration.DestinationConfiguration", - "markdownDescription": "A destination configuration contains information about where recorded video will be stored. See the DestinationConfiguration property type for more information.", + "markdownDescription": "A destination configuration describes an S3 bucket where recorded video will be stored. See the DestinationConfiguration property type for more information.", "title": "DestinationConfiguration" }, "Name": { @@ -111974,6 +115415,100 @@ ], "type": "object" }, + "AWS::IVS::StorageConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Storage cnfiguration name.", + "title": "Name", + "type": "string" + }, + "S3": { + "$ref": "#/definitions/AWS::IVS::StorageConfiguration.S3StorageConfiguration", + "markdownDescription": "An S3 storage configuration contains information about where recorded video will be stored. See the [S3StorageConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-storageconfiguration-s3storageconfiguration.html) property type for more information.", + "title": "S3" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-storageconfiguration-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "S3" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::StorageConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::IVS::StorageConfiguration.S3StorageConfiguration": { + "additionalProperties": false, + "properties": { + "BucketName": { + "markdownDescription": "Name of the S3 bucket where recorded video will be stored.", + "title": "BucketName", + "type": "string" + } + }, + "required": [ + "BucketName" + ], + "type": "object" + }, "AWS::IVS::StreamKey": { "additionalProperties": false, "properties": { @@ -129411,6 +132946,11 @@ "title": "PendingWindowInDays", "type": "number" }, + "RotationPeriodInDays": { + "markdownDescription": "The number of days between each automatic rotation. The default value is 365 days.", + "title": "RotationPeriodInDays", + "type": "number" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -160004,6 +163544,14 @@ "Properties": { "additionalProperties": false, "properties": { + "Audiences": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of audiences defined in channel.", + "title": "Audiences", + "type": "array" + }, "ChannelName": { "markdownDescription": "The name of the channel.", "title": "ChannelName", @@ -166435,6 +169983,11 @@ "title": "LabelTemplate", "type": "string" }, + "LinkConfiguration": { + "$ref": "#/definitions/AWS::Oam::Link.LinkConfiguration", + "markdownDescription": "", + "title": "LinkConfiguration" + }, "ResourceTypes": { "items": { "type": "string" @@ -166487,6 +170040,36 @@ ], "type": "object" }, + "AWS::Oam::Link.LinkConfiguration": { + "additionalProperties": false, + "properties": { + "LogGroupConfiguration": { + "$ref": "#/definitions/AWS::Oam::Link.LinkFilter", + "markdownDescription": "", + "title": "LogGroupConfiguration" + }, + "MetricConfiguration": { + "$ref": "#/definitions/AWS::Oam::Link.LinkFilter", + "markdownDescription": "", + "title": "MetricConfiguration" + } + }, + "type": "object" + }, + "AWS::Oam::Link.LinkFilter": { + "additionalProperties": false, + "properties": { + "Filter": { + "markdownDescription": "", + "title": "Filter", + "type": "string" + } + }, + "required": [ + "Filter" + ], + "type": "object" + }, "AWS::Oam::Sink": { "additionalProperties": false, "properties": { @@ -172334,7 +175917,7 @@ "title": "DatasetImportJob" }, "DatasetType": { - "markdownDescription": "One of the following values:\n\n- Interactions\n- Items\n- Users\n- Actions\n- Action_Interactions", + "markdownDescription": "One of the following values:\n\n- Interactions\n- Items\n- Users\n\n> You can't use CloudFormation to create an Action Interactions or Actions dataset.", "title": "DatasetType", "type": "string" }, @@ -174325,7 +177908,7 @@ }, "EmailMessage": { "$ref": "#/definitions/AWS::Pinpoint::Campaign.CampaignEmailMessage", - "markdownDescription": "The message that the campaign sends through the email channel. If specified, this message overrides the default message.", + "markdownDescription": "The message that the campaign sends through the email channel. If specified, this message overrides the default message.\n\n> The maximum email message size is 200KB. You can use email templates to send larger email messages.", "title": "EmailMessage" }, "GCMMessage": { @@ -182294,7 +185877,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range.", + "markdownDescription": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu.", "title": "Type", "type": "string" } @@ -182458,7 +186041,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", + "markdownDescription": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", "title": "Type", "type": "string" } @@ -182543,7 +186126,7 @@ "additionalProperties": false, "properties": { "AllSheets": { - "markdownDescription": "The configuration for applying a filter to all sheets.", + "markdownDescription": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "title": "AllSheets", "type": "object" }, @@ -182583,12 +186166,12 @@ "type": "string" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -182608,7 +186191,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", + "markdownDescription": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", "title": "Type", "type": "string" } @@ -185973,12 +189556,12 @@ "title": "DisplayOptions" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -194555,7 +198138,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range.", + "markdownDescription": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu.", "title": "Type", "type": "string" } @@ -194719,7 +198302,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", + "markdownDescription": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", "title": "Type", "type": "string" } @@ -194804,7 +198387,7 @@ "additionalProperties": false, "properties": { "AllSheets": { - "markdownDescription": "The configuration for applying a filter to all sheets.", + "markdownDescription": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "title": "AllSheets", "type": "object" }, @@ -194844,12 +198427,12 @@ "type": "string" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -194869,7 +198452,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", + "markdownDescription": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", "title": "Type", "type": "string" } @@ -198248,12 +201831,12 @@ "title": "DisplayOptions" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -208763,7 +212346,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range.", + "markdownDescription": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu.", "title": "Type", "type": "string" } @@ -208927,7 +212510,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", + "markdownDescription": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", "title": "Type", "type": "string" } @@ -209012,7 +212595,7 @@ "additionalProperties": false, "properties": { "AllSheets": { - "markdownDescription": "The configuration for applying a filter to all sheets.", + "markdownDescription": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "title": "AllSheets", "type": "object" }, @@ -209052,12 +212635,12 @@ "type": "string" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -209077,7 +212660,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", + "markdownDescription": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", "title": "Type", "type": "string" } @@ -212419,12 +216002,12 @@ "title": "DisplayOptions" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -218968,6 +222551,11 @@ "title": "EngineVersion", "type": "string" }, + "ImageId": { + "markdownDescription": "A value that indicates the ID of the AMI.", + "title": "ImageId", + "type": "string" + }, "KMSKeyId": { "markdownDescription": "The AWS KMS key identifier for an encrypted CEV. A symmetric encryption KMS key is required for RDS Custom, but optional for Amazon RDS.\n\nIf you have an existing symmetric encryption KMS key in your account, you can use it with RDS Custom. No further action is necessary. If you don't already have a symmetric encryption KMS key in your account, follow the instructions in [Creating a symmetric encryption KMS key](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html#create-symmetric-cmk) in the *AWS Key Management Service Developer Guide* .\n\nYou can choose the same symmetric encryption key when you create a CEV and a DB instance, or choose different keys.", "title": "KMSKeyId", @@ -218978,6 +222566,11 @@ "title": "Manifest", "type": "string" }, + "SourceCustomDbEngineVersionIdentifier": { + "markdownDescription": "The ARN of a CEV to use as a source for creating a new CEV. You can specify a different Amazon Machine Imagine (AMI) by using either `Source` or `UseAwsProvidedLatestImage` . You can't specify a different JSON manifest when you specify `SourceCustomDbEngineVersionIdentifier` .", + "title": "SourceCustomDbEngineVersionIdentifier", + "type": "string" + }, "Status": { "markdownDescription": "A value that indicates the status of a custom engine version (CEV).", "title": "Status", @@ -218990,10 +222583,14 @@ "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.*", "title": "Tags", "type": "array" + }, + "UseAwsProvidedLatestImage": { + "markdownDescription": "Specifies whether to use the latest service-provided Amazon Machine Image (AMI) for the CEV. If you specify `UseAwsProvidedLatestImage` , you can't also specify `ImageId` .", + "title": "UseAwsProvidedLatestImage", + "type": "boolean" } }, "required": [ - "DatabaseInstallationFilesS3BucketName", "Engine", "EngineVersion" ], @@ -219112,7 +222709,7 @@ "type": "string" }, "DBInstanceParameterGroupName": { - "markdownDescription": "The name of the DB parameter group to apply to all instances of the DB cluster.\n\n> When you apply a parameter group using the `DBInstanceParameterGroupName` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. \n\nDefault: The existing name setting\n\nConstraints:\n\n- The DB parameter group must be in the same DB parameter group family as this DB cluster.", + "markdownDescription": "The name of the DB parameter group to apply to all instances of the DB cluster.\n\n> When you apply a parameter group using the `DBInstanceParameterGroupName` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. \n\nValid for Cluster Type: Aurora DB clusters only\n\nDefault: The existing name setting\n\nConstraints:\n\n- The DB parameter group must be in the same DB parameter group family as this DB cluster.\n- The `DBInstanceParameterGroupName` parameter is valid in combination with the `AllowMajorVersionUpgrade` parameter for a major version upgrade only.", "title": "DBInstanceParameterGroupName", "type": "string" }, @@ -219175,7 +222772,7 @@ "type": "string" }, "EngineMode": { - "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", + "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the `provisioned` engine mode.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EngineMode", "type": "string" }, @@ -219792,7 +223389,7 @@ "title": "Endpoint" }, "Engine": { - "markdownDescription": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can change the architecture of an Oracle database from the non-container database (CDB) architecture to the CDB architecture by updating the `Engine` value in your templates from `oracle-ee` or `oracle-ee-cdb` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "markdownDescription": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can convert an Oracle database from the non-CDB architecture to the container database (CDB) architecture by updating the `Engine` value in your templates from `oracle-ee` to `oracle-ee-cdb` or from `oracle-se2` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Engine", "type": "string" }, @@ -219807,7 +223404,7 @@ "type": "number" }, "KmsKeyId": { - "markdownDescription": "The ARN of the AWS KMS key that's used to encrypt the DB instance, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the StorageEncrypted property but don't specify this property, AWS CloudFormation uses the default KMS key. If you specify this property, you must set the StorageEncrypted property to true.\n\nIf you specify the `SourceDBInstanceIdentifier` property, the value is inherited from the source DB instance if the read replica is created in the same region.\n\nIf you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the region that they're created in, and you can't use encryption keys from one region in another region.\n\nIf you specify the `SnapshotIdentifier` property, the `StorageEncrypted` property value is inherited from the snapshot, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify `DBSecurityGroups` , AWS CloudFormation ignores this property. To specify both a security group and this property, you must use a VPC security group. For more information about Amazon RDS and VPC, see [Using Amazon RDS with Amazon VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The KMS key identifier is managed by the DB cluster.", + "markdownDescription": "The ARN of the AWS KMS key that's used to encrypt the DB instance, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the StorageEncrypted property but don't specify this property, AWS CloudFormation uses the default KMS key. If you specify this property, you must set the StorageEncrypted property to true.\n\nIf you specify the `SourceDBInstanceIdentifier` property, the value is inherited from the source DB instance if the read replica is created in the same region.\n\nIf you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the region that they're created in, and you can't use encryption keys from one region in another region.\n\nIf you specify the `DBSnapshotIdentifier` property, don't specify this property. The `StorageEncrypted` property value is inherited from the snapshot. If the DB instance is encrypted, the specified `KmsKeyId` property is also inherited from the snapshot.\n\nIf you specify `DBSecurityGroups` , AWS CloudFormation ignores this property. To specify both a security group and this property, you must use a VPC security group. For more information about Amazon RDS and VPC, see [Using Amazon RDS with Amazon VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The KMS key identifier is managed by the DB cluster.", "title": "KmsKeyId", "type": "string" }, @@ -219950,7 +223547,7 @@ "type": "string" }, "StorageEncrypted": { - "markdownDescription": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB instance is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB instance to be encrypted, then don't set this property or set it to `false` .\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", + "markdownDescription": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the snapshot.\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "title": "StorageEncrypted", "type": "boolean" }, @@ -223012,6 +226609,14 @@ "title": "RedshiftIdcApplicationArn", "type": "string" }, + "SnapshotCopyConfigurations": { + "items": { + "$ref": "#/definitions/AWS::RedshiftServerless::Namespace.SnapshotCopyConfiguration" + }, + "markdownDescription": "", + "title": "SnapshotCopyConfigurations", + "type": "array" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -223124,6 +226729,30 @@ }, "type": "object" }, + "AWS::RedshiftServerless::Namespace.SnapshotCopyConfiguration": { + "additionalProperties": false, + "properties": { + "DestinationKmsKeyId": { + "markdownDescription": "The ID of the KMS key to use to encrypt your snapshots in the destination AWS Region .", + "title": "DestinationKmsKeyId", + "type": "string" + }, + "DestinationRegion": { + "markdownDescription": "The destination AWS Region to copy snapshots to.", + "title": "DestinationRegion", + "type": "string" + }, + "SnapshotRetentionPeriod": { + "markdownDescription": "The retention period of snapshots that are copied to the destination AWS Region .", + "title": "SnapshotRetentionPeriod", + "type": "number" + } + }, + "required": [ + "DestinationRegion" + ], + "type": "object" + }, "AWS::RedshiftServerless::Workgroup": { "additionalProperties": false, "properties": { @@ -226664,7 +230293,7 @@ }, "QueryLoggingConfig": { "$ref": "#/definitions/AWS::Route53::HostedZone.QueryLoggingConfig", - "markdownDescription": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", + "markdownDescription": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. You must create the CloudWatch Logs resource policy in the us-east-1 region. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", "title": "QueryLoggingConfig" }, "VPCs": { @@ -226962,7 +230591,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" }, @@ -227381,7 +231010,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" }, @@ -235621,7 +239250,7 @@ "type": "string" }, "Name": { - "markdownDescription": "A name for the SSM document.\n\n> You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n> \n> - `aws`\n> - `amazon`\n> - `amzn`", + "markdownDescription": "A name for the SSM document.\n\n> You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n> \n> - `aws`\n> - `amazon`\n> - `amzn`\n> - `AWSEC2`\n> - `AWSConfigRemediation`\n> - `AWSSupport`", "title": "Name", "type": "string" }, @@ -238613,6 +242242,11 @@ "title": "AppImageConfigName", "type": "string" }, + "CodeEditorAppImageConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.CodeEditorAppImageConfig", + "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", + "title": "CodeEditorAppImageConfig" + }, "JupyterLabAppImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig", "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", @@ -238658,6 +242292,17 @@ ], "type": "object" }, + "AWS::SageMaker::AppImageConfig.CodeEditorAppImageConfig": { + "additionalProperties": false, + "properties": { + "ContainerConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.ContainerConfig", + "markdownDescription": "", + "title": "ContainerConfig" + } + }, + "type": "object" + }, "AWS::SageMaker::AppImageConfig.ContainerConfig": { "additionalProperties": false, "properties": { @@ -239750,6 +243395,14 @@ "AWS::SageMaker::Domain.CodeEditorAppSettings": { "additionalProperties": false, "properties": { + "CustomImages": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Domain.CustomImage" + }, + "markdownDescription": "A list of custom SageMaker images that are configured to run as a Code Editor app.", + "title": "CustomImages", + "type": "array" + }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", @@ -241253,7 +244906,7 @@ "additionalProperties": false, "properties": { "FeatureName": { - "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start and end with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", + "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", "title": "FeatureName", "type": "string" }, @@ -247561,6 +251214,14 @@ "AWS::SageMaker::UserProfile.CodeEditorAppSettings": { "additionalProperties": false, "properties": { + "CustomImages": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomImage" + }, + "markdownDescription": "A list of custom SageMaker images that are configured to run as a Code Editor app.", + "title": "CustomImages", + "type": "array" + }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", @@ -249786,6 +253447,73 @@ ], "type": "object" }, + "AWS::SecurityHub::DelegatedAdmin": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AdminAccountId": { + "markdownDescription": "The AWS account identifier of the account to designate as the Security Hub administrator account.", + "title": "AdminAccountId", + "type": "string" + } + }, + "required": [ + "AdminAccountId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityHub::DelegatedAdmin" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::SecurityHub::Hub": { "additionalProperties": false, "properties": { @@ -249870,6 +253598,1094 @@ ], "type": "object" }, + "AWS::SecurityHub::Insight": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Filters": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.AwsSecurityFindingFilters", + "markdownDescription": "One or more attributes used to filter the findings included in the insight. The insight only includes findings that match the criteria defined in the filters. You can filter by up to ten finding attributes. For each attribute, you can provide up to 20 filter values.", + "title": "Filters" + }, + "GroupByAttribute": { + "markdownDescription": "The grouping attribute for the insight's findings. Indicates how to group the matching findings, and identifies the type of item that the insight applies to. For example, if an insight is grouped by resource identifier, then the insight produces a list of resource identifiers.", + "title": "GroupByAttribute", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of a Security Hub insight.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Filters", + "GroupByAttribute", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityHub::Insight" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.AwsSecurityFindingFilters": { + "additionalProperties": false, + "properties": { + "AwsAccountId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The AWS account ID in which a finding is generated.", + "title": "AwsAccountId", + "type": "array" + }, + "AwsAccountName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the AWS account in which a finding is generated.", + "title": "AwsAccountName", + "type": "array" + }, + "CompanyName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the findings provider (company) that owns the solution (product) that generates findings.", + "title": "CompanyName", + "type": "array" + }, + "ComplianceAssociatedStandardsId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the [DescribeStandards](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_DescribeStandards.html) API response.", + "title": "ComplianceAssociatedStandardsId", + "type": "array" + }, + "ComplianceSecurityControlId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The unique identifier of a control across standards. Values for this field typically consist of an AWS service and a number, such as APIGateway.5.", + "title": "ComplianceSecurityControlId", + "type": "array" + }, + "ComplianceSecurityControlParametersName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of a security control parameter.", + "title": "ComplianceSecurityControlParametersName", + "type": "array" + }, + "ComplianceSecurityControlParametersValue": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The current value of a security control parameter.", + "title": "ComplianceSecurityControlParametersValue", + "type": "array" + }, + "ComplianceStatus": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details.", + "title": "ComplianceStatus", + "type": "array" + }, + "Confidence": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", + "title": "Confidence", + "type": "array" + }, + "CreatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "CreatedAt", + "type": "array" + }, + "Criticality": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The level of importance assigned to the resources associated with the finding.\n\nA score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.", + "title": "Criticality", + "type": "array" + }, + "Description": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A finding's description.", + "title": "Description", + "type": "array" + }, + "FindingProviderFieldsConfidence": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", + "title": "FindingProviderFieldsConfidence", + "type": "array" + }, + "FindingProviderFieldsCriticality": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The finding provider value for the level of importance assigned to the resources associated with the findings.\n\nA score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.", + "title": "FindingProviderFieldsCriticality", + "type": "array" + }, + "FindingProviderFieldsRelatedFindingsId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The finding identifier of a related finding that is identified by the finding provider.", + "title": "FindingProviderFieldsRelatedFindingsId", + "type": "array" + }, + "FindingProviderFieldsRelatedFindingsProductArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN of the solution that generated a related finding that is identified by the finding provider.", + "title": "FindingProviderFieldsRelatedFindingsProductArn", + "type": "array" + }, + "FindingProviderFieldsSeverityLabel": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The finding provider value for the severity label.", + "title": "FindingProviderFieldsSeverityLabel", + "type": "array" + }, + "FindingProviderFieldsSeverityOriginal": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The finding provider's original value for the severity.", + "title": "FindingProviderFieldsSeverityOriginal", + "type": "array" + }, + "FindingProviderFieldsTypes": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "One or more finding types that the finding provider assigned to the finding. Uses the format of `namespace/category/classifier` that classify a finding.\n\nValid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications", + "title": "FindingProviderFieldsTypes", + "type": "array" + }, + "FirstObservedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "FirstObservedAt", + "type": "array" + }, + "GeneratorId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.", + "title": "GeneratorId", + "type": "array" + }, + "Id": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The security findings provider-specific identifier for a finding.", + "title": "Id", + "type": "array" + }, + "LastObservedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "LastObservedAt", + "type": "array" + }, + "MalwareName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the malware that was observed.", + "title": "MalwareName", + "type": "array" + }, + "MalwarePath": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The filesystem path of the malware that was observed.", + "title": "MalwarePath", + "type": "array" + }, + "MalwareState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The state of the malware that was observed.", + "title": "MalwareState", + "type": "array" + }, + "MalwareType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The type of the malware that was observed.", + "title": "MalwareType", + "type": "array" + }, + "NetworkDestinationDomain": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The destination domain of network-related information about a finding.", + "title": "NetworkDestinationDomain", + "type": "array" + }, + "NetworkDestinationIpV4": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The destination IPv4 address of network-related information about a finding.", + "title": "NetworkDestinationIpV4", + "type": "array" + }, + "NetworkDestinationIpV6": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The destination IPv6 address of network-related information about a finding.", + "title": "NetworkDestinationIpV6", + "type": "array" + }, + "NetworkDestinationPort": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The destination port of network-related information about a finding.", + "title": "NetworkDestinationPort", + "type": "array" + }, + "NetworkDirection": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Indicates the direction of network traffic associated with a finding.", + "title": "NetworkDirection", + "type": "array" + }, + "NetworkProtocol": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The protocol of network-related information about a finding.", + "title": "NetworkProtocol", + "type": "array" + }, + "NetworkSourceDomain": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The source domain of network-related information about a finding.", + "title": "NetworkSourceDomain", + "type": "array" + }, + "NetworkSourceIpV4": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The source IPv4 address of network-related information about a finding.", + "title": "NetworkSourceIpV4", + "type": "array" + }, + "NetworkSourceIpV6": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The source IPv6 address of network-related information about a finding.", + "title": "NetworkSourceIpV6", + "type": "array" + }, + "NetworkSourceMac": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The source media access control (MAC) address of network-related information about a finding.", + "title": "NetworkSourceMac", + "type": "array" + }, + "NetworkSourcePort": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The source port of network-related information about a finding.", + "title": "NetworkSourcePort", + "type": "array" + }, + "NoteText": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The text of a note.", + "title": "NoteText", + "type": "array" + }, + "NoteUpdatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "The timestamp of when the note was updated.", + "title": "NoteUpdatedAt", + "type": "array" + }, + "NoteUpdatedBy": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The principal that created a note.", + "title": "NoteUpdatedBy", + "type": "array" + }, + "ProcessLaunchedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies when the process was launched.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "ProcessLaunchedAt", + "type": "array" + }, + "ProcessName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the process.", + "title": "ProcessName", + "type": "array" + }, + "ProcessParentPid": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The parent process ID. This field accepts positive integers between `O` and `2147483647` .", + "title": "ProcessParentPid", + "type": "array" + }, + "ProcessPath": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The path to the process executable.", + "title": "ProcessPath", + "type": "array" + }, + "ProcessPid": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The process ID.", + "title": "ProcessPid", + "type": "array" + }, + "ProcessTerminatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies when the process was terminated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "ProcessTerminatedAt", + "type": "array" + }, + "ProductArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub.", + "title": "ProductArn", + "type": "array" + }, + "ProductFields": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "A data type where security findings providers can include additional solution-specific details that aren't part of the defined `AwsSecurityFinding` format.", + "title": "ProductFields", + "type": "array" + }, + "ProductName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the solution (product) that generates findings.", + "title": "ProductName", + "type": "array" + }, + "RecommendationText": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The recommendation of what to do about the issue described in a finding.", + "title": "RecommendationText", + "type": "array" + }, + "RecordState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The updated record state for the finding.", + "title": "RecordState", + "type": "array" + }, + "Region": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The Region from which the finding was generated.", + "title": "Region", + "type": "array" + }, + "RelatedFindingsId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The solution-generated identifier for a related finding.", + "title": "RelatedFindingsId", + "type": "array" + }, + "RelatedFindingsProductArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN of the solution that generated a related finding.", + "title": "RelatedFindingsProductArn", + "type": "array" + }, + "ResourceApplicationArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN of the application that is related to a finding.", + "title": "ResourceApplicationArn", + "type": "array" + }, + "ResourceApplicationName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the application that is related to a finding.", + "title": "ResourceApplicationName", + "type": "array" + }, + "ResourceAwsEc2InstanceIamInstanceProfileArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The IAM profile ARN of the instance.", + "title": "ResourceAwsEc2InstanceIamInstanceProfileArn", + "type": "array" + }, + "ResourceAwsEc2InstanceImageId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The Amazon Machine Image (AMI) ID of the instance.", + "title": "ResourceAwsEc2InstanceImageId", + "type": "array" + }, + "ResourceAwsEc2InstanceIpV4Addresses": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The IPv4 addresses associated with the instance.", + "title": "ResourceAwsEc2InstanceIpV4Addresses", + "type": "array" + }, + "ResourceAwsEc2InstanceIpV6Addresses": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The IPv6 addresses associated with the instance.", + "title": "ResourceAwsEc2InstanceIpV6Addresses", + "type": "array" + }, + "ResourceAwsEc2InstanceKeyName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The key name associated with the instance.", + "title": "ResourceAwsEc2InstanceKeyName", + "type": "array" + }, + "ResourceAwsEc2InstanceLaunchedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "The date and time the instance was launched.", + "title": "ResourceAwsEc2InstanceLaunchedAt", + "type": "array" + }, + "ResourceAwsEc2InstanceSubnetId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier of the subnet that the instance was launched in.", + "title": "ResourceAwsEc2InstanceSubnetId", + "type": "array" + }, + "ResourceAwsEc2InstanceType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The instance type of the instance.", + "title": "ResourceAwsEc2InstanceType", + "type": "array" + }, + "ResourceAwsEc2InstanceVpcId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier of the VPC that the instance was launched in.", + "title": "ResourceAwsEc2InstanceVpcId", + "type": "array" + }, + "ResourceAwsIamAccessKeyCreatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "The creation date/time of the IAM access key related to a finding.", + "title": "ResourceAwsIamAccessKeyCreatedAt", + "type": "array" + }, + "ResourceAwsIamAccessKeyPrincipalName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the principal that is associated with an IAM access key.", + "title": "ResourceAwsIamAccessKeyPrincipalName", + "type": "array" + }, + "ResourceAwsIamAccessKeyStatus": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The status of the IAM access key related to a finding.", + "title": "ResourceAwsIamAccessKeyStatus", + "type": "array" + }, + "ResourceAwsIamUserUserName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of an IAM user.", + "title": "ResourceAwsIamUserUserName", + "type": "array" + }, + "ResourceAwsS3BucketOwnerId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical user ID of the owner of the S3 bucket.", + "title": "ResourceAwsS3BucketOwnerId", + "type": "array" + }, + "ResourceAwsS3BucketOwnerName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The display name of the owner of the S3 bucket.", + "title": "ResourceAwsS3BucketOwnerName", + "type": "array" + }, + "ResourceContainerImageId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier of the image related to a finding.", + "title": "ResourceContainerImageId", + "type": "array" + }, + "ResourceContainerImageName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the image related to a finding.", + "title": "ResourceContainerImageName", + "type": "array" + }, + "ResourceContainerLaunchedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies when the container was started.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "ResourceContainerLaunchedAt", + "type": "array" + }, + "ResourceContainerName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the container related to a finding.", + "title": "ResourceContainerName", + "type": "array" + }, + "ResourceDetailsOther": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "The details of a resource that doesn't have a specific subfield for the resource type defined.", + "title": "ResourceDetailsOther", + "type": "array" + }, + "ResourceId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical identifier for the given resource type.", + "title": "ResourceId", + "type": "array" + }, + "ResourcePartition": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical AWS partition name that the Region is assigned to.", + "title": "ResourcePartition", + "type": "array" + }, + "ResourceRegion": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical AWS external Region name where this resource is located.", + "title": "ResourceRegion", + "type": "array" + }, + "ResourceTags": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "A list of AWS tags associated with a resource at the time the finding was processed.", + "title": "ResourceTags", + "type": "array" + }, + "ResourceType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Specifies the type of the resource that details are provided for.", + "title": "ResourceType", + "type": "array" + }, + "Sample": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.BooleanFilter" + }, + "markdownDescription": "Indicates whether or not sample findings are included in the filter results.", + "title": "Sample", + "type": "array" + }, + "SeverityLabel": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The label of a finding's severity.", + "title": "SeverityLabel", + "type": "array" + }, + "SourceUrl": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A URL that links to a page about the current finding in the security findings provider's solution.", + "title": "SourceUrl", + "type": "array" + }, + "ThreatIntelIndicatorCategory": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The category of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorCategory", + "type": "array" + }, + "ThreatIntelIndicatorLastObservedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies the last observation of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorLastObservedAt", + "type": "array" + }, + "ThreatIntelIndicatorSource": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The source of the threat intelligence.", + "title": "ThreatIntelIndicatorSource", + "type": "array" + }, + "ThreatIntelIndicatorSourceUrl": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The URL for more details from the source of the threat intelligence.", + "title": "ThreatIntelIndicatorSourceUrl", + "type": "array" + }, + "ThreatIntelIndicatorType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The type of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorType", + "type": "array" + }, + "ThreatIntelIndicatorValue": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The value of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorValue", + "type": "array" + }, + "Title": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A finding's title.", + "title": "Title", + "type": "array" + }, + "Type": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A finding type in the format of `namespace/category/classifier` that classifies a finding.", + "title": "Type", + "type": "array" + }, + "UpdatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "UpdatedAt", + "type": "array" + }, + "UserDefinedFields": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.", + "title": "UserDefinedFields", + "type": "array" + }, + "VerificationState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The veracity of a finding.", + "title": "VerificationState", + "type": "array" + }, + "VulnerabilitiesExploitAvailable": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Indicates whether a software vulnerability in your environment has a known exploit. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", + "title": "VulnerabilitiesExploitAvailable", + "type": "array" + }, + "VulnerabilitiesFixAvailable": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Indicates whether a vulnerability is fixed in a newer version of the affected software packages. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", + "title": "VulnerabilitiesFixAvailable", + "type": "array" + }, + "WorkflowState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The workflow state of a finding.\n\nNote that this field is deprecated. To search for a finding based on its workflow status, use `WorkflowStatus` .", + "title": "WorkflowState", + "type": "array" + }, + "WorkflowStatus": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` .", + "title": "WorkflowStatus", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SecurityHub::Insight.BooleanFilter": { + "additionalProperties": false, + "properties": { + "Value": { + "markdownDescription": "The value of the boolean.", + "title": "Value", + "type": "boolean" + } + }, + "required": [ + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.DateFilter": { + "additionalProperties": false, + "properties": { + "DateRange": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateRange", + "markdownDescription": "A date range for the date filter.", + "title": "DateRange" + }, + "End": { + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "End", + "type": "string" + }, + "Start": { + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "Start", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityHub::Insight.DateRange": { + "additionalProperties": false, + "properties": { + "Unit": { + "markdownDescription": "A date range unit for the date filter.", + "title": "Unit", + "type": "string" + }, + "Value": { + "markdownDescription": "A date range value for the date filter.", + "title": "Value", + "type": "number" + } + }, + "required": [ + "Unit", + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.IpFilter": { + "additionalProperties": false, + "properties": { + "Cidr": { + "markdownDescription": "A finding's CIDR value.", + "title": "Cidr", + "type": "string" + } + }, + "required": [ + "Cidr" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.MapFilter": { + "additionalProperties": false, + "properties": { + "Comparison": { + "markdownDescription": "The condition to apply to the key value when filtering Security Hub findings with a map filter.\n\nTo search for values that have the filter value, use one of the following comparison operators:\n\n- To search for values that include the filter value, use `CONTAINS` . For example, for the `ResourceTags` field, the filter `Department CONTAINS Security` matches findings that include the value `Security` for the `Department` tag. In the same example, a finding with a value of `Security team` for the `Department` tag is a match.\n- To search for values that exactly match the filter value, use `EQUALS` . For example, for the `ResourceTags` field, the filter `Department EQUALS Security` matches findings that have the value `Security` for the `Department` tag.\n\n`CONTAINS` and `EQUALS` filters on the same field are joined by `OR` . A finding matches if it matches any one of those filters. For example, the filters `Department CONTAINS Security OR Department CONTAINS Finance` match a finding that includes either `Security` , `Finance` , or both values.\n\nTo search for values that don't have the filter value, use one of the following comparison operators:\n\n- To search for values that exclude the filter value, use `NOT_CONTAINS` . For example, for the `ResourceTags` field, the filter `Department NOT_CONTAINS Finance` matches findings that exclude the value `Finance` for the `Department` tag.\n- To search for values other than the filter value, use `NOT_EQUALS` . For example, for the `ResourceTags` field, the filter `Department NOT_EQUALS Finance` matches findings that don\u2019t have the value `Finance` for the `Department` tag.\n\n`NOT_CONTAINS` and `NOT_EQUALS` filters on the same field are joined by `AND` . A finding matches only if it matches all of those filters. For example, the filters `Department NOT_CONTAINS Security AND Department NOT_CONTAINS Finance` match a finding that excludes both the `Security` and `Finance` values.\n\n`CONTAINS` filters can only be used with other `CONTAINS` filters. `NOT_CONTAINS` filters can only be used with other `NOT_CONTAINS` filters.\n\nYou can\u2019t have both a `CONTAINS` filter and a `NOT_CONTAINS` filter on the same field. Similarly, you can\u2019t have both an `EQUALS` filter and a `NOT_EQUALS` filter on the same field. Combining filters in this way returns an error.\n\n`CONTAINS` and `NOT_CONTAINS` operators can be used only with automation rules. For more information, see [Automation rules](https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) in the *AWS Security Hub User Guide* .", + "title": "Comparison", + "type": "string" + }, + "Key": { + "markdownDescription": "The key of the map filter. For example, for `ResourceTags` , `Key` identifies the name of the tag. For `UserDefinedFields` , `Key` is the name of the field.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called `Department` might be `Security` . If you provide `security` as the filter value, then there's no match.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Comparison", + "Key", + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.NumberFilter": { + "additionalProperties": false, + "properties": { + "Eq": { + "markdownDescription": "The equal-to condition to be applied to a single field when querying for findings.", + "title": "Eq", + "type": "number" + }, + "Gte": { + "markdownDescription": "The greater-than-equal condition to be applied to a single field when querying for findings.", + "title": "Gte", + "type": "number" + }, + "Lte": { + "markdownDescription": "The less-than-equal condition to be applied to a single field when querying for findings.", + "title": "Lte", + "type": "number" + } + }, + "type": "object" + }, + "AWS::SecurityHub::Insight.StringFilter": { + "additionalProperties": false, + "properties": { + "Comparison": { + "markdownDescription": "The condition to apply to a string value when filtering Security Hub findings.\n\nTo search for values that have the filter value, use one of the following comparison operators:\n\n- To search for values that include the filter value, use `CONTAINS` . For example, the filter `Title CONTAINS CloudFront` matches findings that have a `Title` that includes the string CloudFront.\n- To search for values that exactly match the filter value, use `EQUALS` . For example, the filter `AwsAccountId EQUALS 123456789012` only matches findings that have an account ID of `123456789012` .\n- To search for values that start with the filter value, use `PREFIX` . For example, the filter `ResourceRegion PREFIX us` matches findings that have a `ResourceRegion` that starts with `us` . A `ResourceRegion` that starts with a different value, such as `af` , `ap` , or `ca` , doesn't match.\n\n`CONTAINS` , `EQUALS` , and `PREFIX` filters on the same field are joined by `OR` . A finding matches if it matches any one of those filters. For example, the filters `Title CONTAINS CloudFront OR Title CONTAINS CloudWatch` match a finding that includes either `CloudFront` , `CloudWatch` , or both strings in the title.\n\nTo search for values that don\u2019t have the filter value, use one of the following comparison operators:\n\n- To search for values that exclude the filter value, use `NOT_CONTAINS` . For example, the filter `Title NOT_CONTAINS CloudFront` matches findings that have a `Title` that excludes the string CloudFront.\n- To search for values other than the filter value, use `NOT_EQUALS` . For example, the filter `AwsAccountId NOT_EQUALS 123456789012` only matches findings that have an account ID other than `123456789012` .\n- To search for values that don't start with the filter value, use `PREFIX_NOT_EQUALS` . For example, the filter `ResourceRegion PREFIX_NOT_EQUALS us` matches findings with a `ResourceRegion` that starts with a value other than `us` .\n\n`NOT_CONTAINS` , `NOT_EQUALS` , and `PREFIX_NOT_EQUALS` filters on the same field are joined by `AND` . A finding matches only if it matches all of those filters. For example, the filters `Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch` match a finding that excludes both `CloudFront` and `CloudWatch` in the title.\n\nYou can\u2019t have both a `CONTAINS` filter and a `NOT_CONTAINS` filter on the same field. Similarly, you can't provide both an `EQUALS` filter and a `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filter on the same field. Combining filters in this way returns an error. `CONTAINS` filters can only be used with other `CONTAINS` filters. `NOT_CONTAINS` filters can only be used with other `NOT_CONTAINS` filters.\n\nYou can combine `PREFIX` filters with `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filters for the same field. Security Hub first processes the `PREFIX` filters, and then the `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filters.\n\nFor example, for the following filters, Security Hub first identifies findings that have resource types that start with either `AwsIam` or `AwsEc2` . It then excludes findings that have a resource type of `AwsIamPolicy` and findings that have a resource type of `AwsEc2NetworkInterface` .\n\n- `ResourceType PREFIX AwsIam`\n- `ResourceType PREFIX AwsEc2`\n- `ResourceType NOT_EQUALS AwsIamPolicy`\n- `ResourceType NOT_EQUALS AwsEc2NetworkInterface`\n\n`CONTAINS` and `NOT_CONTAINS` operators can be used only with automation rules. For more information, see [Automation rules](https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) in the *AWS Security Hub User Guide* .", + "title": "Comparison", + "type": "string" + }, + "Value": { + "markdownDescription": "The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is `Security Hub` . If you provide `security hub` as the filter value, there's no match.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Comparison", + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::ProductSubscription": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ProductArn": { + "markdownDescription": "The ARN of the product to enable the integration for.", + "title": "ProductArn", + "type": "string" + } + }, + "required": [ + "ProductArn" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityHub::ProductSubscription" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::SecurityHub::Standard": { "additionalProperties": false, "properties": { @@ -249964,6 +254780,93 @@ ], "type": "object" }, + "AWS::SecurityLake::AwsLogSource": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Accounts": { + "items": { + "type": "string" + }, + "markdownDescription": "Specify the AWS account information where you want to enable Security Lake.", + "title": "Accounts", + "type": "array" + }, + "DataLakeArn": { + "markdownDescription": "The Amazon Resource Name (ARN) used to create the data lake.", + "title": "DataLakeArn", + "type": "string" + }, + "SourceName": { + "markdownDescription": "The name for a AWS source. This must be a Regionally unique value. For the list of sources supported by Amazon Security Lake see [Collecting data from AWS services](https://docs.aws.amazon.com//security-lake/latest/userguide/internal-sources.html) in the Amazon Security Lake User Guide.", + "title": "SourceName", + "type": "string" + }, + "SourceVersion": { + "markdownDescription": "The version for a AWS source. For more details about source versions supported by Amazon Security Lake see [OCSF source identification](https://docs.aws.amazon.com//security-lake/latest/userguide/open-cybersecurity-schema-framework.html#ocsf-source-identification) in the Amazon Security Lake User Guide. This must be a Regionally unique value.", + "title": "SourceVersion", + "type": "string" + } + }, + "required": [ + "DataLakeArn", + "SourceName", + "SourceVersion" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityLake::AwsLogSource" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::SecurityLake::DataLake": { "additionalProperties": false, "properties": { @@ -249999,30 +254902,225 @@ "Properties": { "additionalProperties": false, "properties": { - "EncryptionConfiguration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.EncryptionConfiguration" + "EncryptionConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.EncryptionConfiguration", + "markdownDescription": "Provides encryption details of the Amazon Security Lake object.", + "title": "EncryptionConfiguration" + }, + "LifecycleConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.LifecycleConfiguration", + "markdownDescription": "You can customize Security Lake to store data in your preferred AWS Regions for your preferred amount of time. Lifecycle management can help you comply with different compliance requirements. For more details, see [Lifecycle management](https://docs.aws.amazon.com//security-lake/latest/userguide/lifecycle-management.html) in the Amazon Security Lake User Guide.", + "title": "LifecycleConfiguration" + }, + "MetaStoreManagerRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources.", + "title": "MetaStoreManagerRoleArn", + "type": "string" + }, + "ReplicationConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.ReplicationConfiguration", + "markdownDescription": "Provides replication details of Amazon Security Lake object.", + "title": "ReplicationConfiguration" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of objects, one for each tag to associate with the data lake configuration. For each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but it can be an empty string.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityLake::DataLake" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::SecurityLake::DataLake.EncryptionConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "markdownDescription": "The ID of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object.", + "title": "KmsKeyId", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Expiration": { + "additionalProperties": false, + "properties": { + "Days": { + "markdownDescription": "The number of days before data expires in the Amazon Security Lake object.", + "title": "Days", + "type": "number" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.LifecycleConfiguration": { + "additionalProperties": false, + "properties": { + "Expiration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Expiration", + "markdownDescription": "Provides data expiration details of the Amazon Security Lake object.", + "title": "Expiration" + }, + "Transitions": { + "items": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Transitions" + }, + "markdownDescription": "Provides data storage transition details of Amazon Security Lake object. By configuring these settings, you can specify your preferred Amazon S3 storage class and the time period for S3 objects to stay in that storage class before they transition to a different storage class.", + "title": "Transitions", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.ReplicationConfiguration": { + "additionalProperties": false, + "properties": { + "Regions": { + "items": { + "type": "string" + }, + "markdownDescription": "Specifies one or more centralized rollup Regions. The AWS Region specified in the region parameter of the `CreateDataLake` or `UpdateDataLake` operations contributes data to the rollup Region or Regions specified in this parameter.\n\nReplication enables automatic, asynchronous copying of objects across Amazon S3 buckets. S3 buckets that are configured for object replication can be owned by the same AWS account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Regions or within the same Region as the source bucket.", + "title": "Regions", + "type": "array" + }, + "RoleArn": { + "markdownDescription": "Replication settings for the Amazon S3 buckets. This parameter uses the AWS Identity and Access Management (IAM) role you created that is managed by Security Lake , to ensure the replication setting is correct.", + "title": "RoleArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Transitions": { + "additionalProperties": false, + "properties": { + "Days": { + "markdownDescription": "The number of days before data transitions to a different S3 Storage Class in the Amazon Security Lake object.", + "title": "Days", + "type": "number" + }, + "StorageClass": { + "markdownDescription": "The list of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads. The default storage class is S3 Standard.", + "title": "StorageClass", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::Subscriber": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AccessTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "You can choose to notify subscribers of new objects with an Amazon Simple Queue Service (Amazon SQS) queue or through messaging to an HTTPS endpoint provided by the subscriber.\n\nSubscribers can consume data by directly querying AWS Lake Formation tables in your Amazon S3 bucket through services like Amazon Athena. This subscription type is defined as `LAKEFORMATION` .", + "title": "AccessTypes", + "type": "array" }, - "LifecycleConfiguration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.LifecycleConfiguration" + "DataLakeArn": { + "markdownDescription": "The Amazon Resource Name (ARN) used to create the data lake.", + "title": "DataLakeArn", + "type": "string" }, - "MetaStoreManagerRoleArn": { + "Sources": { + "items": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.Source" + }, + "markdownDescription": "Amazon Security Lake supports log and event collection for natively supported AWS services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", + "title": "Sources", + "type": "array" + }, + "SubscriberDescription": { + "markdownDescription": "The subscriber descriptions for a subscriber account. The description for a subscriber includes `subscriberName` , `accountID` , `externalID` , and `subscriberId` .", + "title": "SubscriberDescription", "type": "string" }, - "ReplicationConfiguration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.ReplicationConfiguration" + "SubscriberIdentity": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.SubscriberIdentity", + "markdownDescription": "The AWS identity used to access your data.", + "title": "SubscriberIdentity" + }, + "SubscriberName": { + "markdownDescription": "The name of your Amazon Security Lake subscriber account.", + "title": "SubscriberName", + "type": "string" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, + "markdownDescription": "An array of objects, one for each tag to associate with the subscriber. For each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but it can be an empty string.", + "title": "Tags", "type": "array" } }, + "required": [ + "AccessTypes", + "DataLakeArn", + "Sources", + "SubscriberIdentity", + "SubscriberName" + ], "type": "object" }, "Type": { "enum": [ - "AWS::SecurityLake::DataLake" + "AWS::SecurityLake::Subscriber" ], "type": "string" }, @@ -250036,68 +255134,77 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, - "AWS::SecurityLake::DataLake.EncryptionConfiguration": { + "AWS::SecurityLake::Subscriber.AwsLogSource": { "additionalProperties": false, "properties": { - "KmsKeyId": { + "SourceName": { + "markdownDescription": "Source name of the natively supported AWS service that is supported as an Amazon Security Lake source. For the list of sources supported by Amazon Security Lake see [Collecting data from AWS services](https://docs.aws.amazon.com//security-lake/latest/userguide/internal-sources.html) in the Amazon Security Lake User Guide.", + "title": "SourceName", + "type": "string" + }, + "SourceVersion": { + "markdownDescription": "Source version of the natively supported AWS service that is supported as an Amazon Security Lake source. For more details about source versions supported by Amazon Security Lake see [OCSF source identification](https://docs.aws.amazon.com//security-lake/latest/userguide/open-cybersecurity-schema-framework.html#ocsf-source-identification) in the Amazon Security Lake User Guide.", + "title": "SourceVersion", "type": "string" } }, "type": "object" }, - "AWS::SecurityLake::DataLake.Expiration": { - "additionalProperties": false, - "properties": { - "Days": { - "type": "number" - } - }, - "type": "object" - }, - "AWS::SecurityLake::DataLake.LifecycleConfiguration": { + "AWS::SecurityLake::Subscriber.CustomLogSource": { "additionalProperties": false, "properties": { - "Expiration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.Expiration" + "SourceName": { + "markdownDescription": "The name of the custom log source.", + "title": "SourceName", + "type": "string" }, - "Transitions": { - "items": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.Transitions" - }, - "type": "array" + "SourceVersion": { + "markdownDescription": "The source version of the custom log source.", + "title": "SourceVersion", + "type": "string" } }, "type": "object" }, - "AWS::SecurityLake::DataLake.ReplicationConfiguration": { + "AWS::SecurityLake::Subscriber.Source": { "additionalProperties": false, "properties": { - "Regions": { - "items": { - "type": "string" - }, - "type": "array" + "AwsLogSource": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.AwsLogSource", + "markdownDescription": "The natively supported AWS service which is used a Amazon Security Lake source to collect logs and events from.", + "title": "AwsLogSource" }, - "RoleArn": { - "type": "string" + "CustomLogSource": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.CustomLogSource", + "markdownDescription": "The custom log source AWS which is used a Amazon Security Lake source to collect logs and events from.", + "title": "CustomLogSource" } }, "type": "object" }, - "AWS::SecurityLake::DataLake.Transitions": { + "AWS::SecurityLake::Subscriber.SubscriberIdentity": { "additionalProperties": false, "properties": { - "Days": { - "type": "number" + "ExternalId": { + "markdownDescription": "The external ID is a unique identifier that the subscriber provides to you.", + "title": "ExternalId", + "type": "string" }, - "StorageClass": { + "Principal": { + "markdownDescription": "Principals can include accounts, users, roles, federated users, or AWS services.", + "title": "Principal", "type": "string" } }, + "required": [ + "ExternalId", + "Principal" + ], "type": "object" }, "AWS::ServiceCatalog::AcceptedPortfolioShare": { @@ -254519,36 +259626,241 @@ "Properties": { "additionalProperties": false, "properties": { - "Name": { - "markdownDescription": "A name for the group. It can include any Unicode characters.\n\nThe names for all groups in your account, across all Regions, must be unique.", - "title": "Name", + "Name": { + "markdownDescription": "A name for the group. It can include any Unicode characters.\n\nThe names for all groups in your account, across all Regions, must be unique.", + "title": "Name", + "type": "string" + }, + "ResourceArns": { + "items": { + "type": "string" + }, + "markdownDescription": "The ARNs of the canaries that you want to associate with this group.", + "title": "ResourceArns", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The list of key-value pairs that are associated with the group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Synthetics::Group" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::SystemsManagerSAP::Application": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicationId": { + "markdownDescription": "The ID of the application.", + "title": "ApplicationId", + "type": "string" + }, + "ApplicationType": { + "markdownDescription": "The type of the application.", + "title": "ApplicationType", + "type": "string" + }, + "Credentials": { + "items": { + "$ref": "#/definitions/AWS::SystemsManagerSAP::Application.Credential" + }, + "markdownDescription": "The credentials of the SAP application.", + "title": "Credentials", + "type": "array" + }, + "Instances": { + "items": { + "type": "string" + }, + "markdownDescription": "The Amazon EC2 instances on which your SAP application is running.", + "title": "Instances", + "type": "array" + }, + "SapInstanceNumber": { + "markdownDescription": "The SAP instance number of the application.", + "title": "SapInstanceNumber", + "type": "string" + }, + "Sid": { + "markdownDescription": "The System ID of the application.", + "title": "Sid", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags on the application.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ApplicationId", + "ApplicationType" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SystemsManagerSAP::Application" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::SystemsManagerSAP::Application.Credential": { + "additionalProperties": false, + "properties": { + "CredentialType": { + "markdownDescription": "The type of the application credentials.", + "title": "CredentialType", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of the SAP HANA database.", + "title": "DatabaseName", + "type": "string" + }, + "SecretId": { + "markdownDescription": "The secret ID created in AWS Secrets Manager to store the credentials of the SAP application.", + "title": "SecretId", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Timestream::Database": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DatabaseName": { + "markdownDescription": "The name of the Timestream database.\n\n*Length Constraints* : Minimum length of 3 bytes. Maximum length of 256 bytes.", + "title": "DatabaseName", "type": "string" }, - "ResourceArns": { - "items": { - "type": "string" - }, - "markdownDescription": "The ARNs of the canaries that you want to associate with this group.", - "title": "ResourceArns", - "type": "array" + "KmsKeyId": { + "markdownDescription": "The identifier of the AWS KMS key used to encrypt the data stored in the database.", + "title": "KmsKeyId", + "type": "string" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The list of key-value pairs that are associated with the group.", + "markdownDescription": "The tags to add to the database.", "title": "Tags", "type": "array" } }, - "required": [ - "Name" - ], "type": "object" }, "Type": { "enum": [ - "AWS::Synthetics::Group" + "AWS::Timestream::Database" ], "type": "string" }, @@ -254562,12 +259874,11 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, - "AWS::SystemsManagerSAP::Application": { + "AWS::Timestream::InfluxDBInstance": { "additionalProperties": false, "properties": { "Condition": { @@ -254602,60 +259913,96 @@ "Properties": { "additionalProperties": false, "properties": { - "ApplicationId": { - "markdownDescription": "The ID of the application.", - "title": "ApplicationId", + "AllocatedStorage": { + "markdownDescription": "The amount of storage to allocate for your DB storage type in GiB (gibibytes).", + "title": "AllocatedStorage", + "type": "number" + }, + "Bucket": { + "markdownDescription": "The name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization.", + "title": "Bucket", "type": "string" }, - "ApplicationType": { - "markdownDescription": "The type of the application.", - "title": "ApplicationType", + "DbInstanceType": { + "markdownDescription": "The Timestream for InfluxDB DB instance type to run on.", + "title": "DbInstanceType", "type": "string" }, - "Credentials": { - "items": { - "$ref": "#/definitions/AWS::SystemsManagerSAP::Application.Credential" - }, - "markdownDescription": "The credentials of the SAP application.", - "title": "Credentials", - "type": "array" + "DbParameterGroupIdentifier": { + "markdownDescription": "The name or id of the DB parameter group to assign to your DB instance. DB parameter groups specify how the database is configured. For example, DB parameter groups can specify the limit for query concurrency.", + "title": "DbParameterGroupIdentifier", + "type": "string" }, - "Instances": { - "items": { - "type": "string" - }, - "markdownDescription": "The Amazon EC2 instances on which your SAP application is running.", - "title": "Instances", - "type": "array" + "DbStorageType": { + "markdownDescription": "The Timestream for InfluxDB DB storage type to read and write InfluxDB data.\n\nYou can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements:\n\n- Influx IO Included 3000 IOPS\n- Influx IO Included 12000 IOPS\n- Influx IO Included 16000 IOPS", + "title": "DbStorageType", + "type": "string" }, - "SapInstanceNumber": { - "markdownDescription": "The SAP instance number of the application.", - "title": "SapInstanceNumber", + "DeploymentType": { + "markdownDescription": "Specifies whether the Timestream for InfluxDB is deployed as Single-AZ or with a MultiAZ Standby for High availability.", + "title": "DeploymentType", "type": "string" }, - "Sid": { - "markdownDescription": "The System ID of the application.", - "title": "Sid", + "LogDeliveryConfiguration": { + "$ref": "#/definitions/AWS::Timestream::InfluxDBInstance.LogDeliveryConfiguration", + "markdownDescription": "Configuration for sending InfluxDB engine logs to a specified S3 bucket.", + "title": "LogDeliveryConfiguration" + }, + "Name": { + "markdownDescription": "The name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region.", + "title": "Name", + "type": "string" + }, + "Organization": { + "markdownDescription": "The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users.", + "title": "Organization", + "type": "string" + }, + "Password": { + "markdownDescription": "The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon SecretManager in your account.", + "title": "Password", "type": "string" }, + "PubliclyAccessible": { + "markdownDescription": "Configures the DB instance with a public IP to facilitate access.", + "title": "PubliclyAccessible", + "type": "boolean" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags on the application.", + "markdownDescription": "A list of key-value pairs to associate with the DB instance.", "title": "Tags", "type": "array" + }, + "Username": { + "markdownDescription": "The username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. For example, my-user1. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Secrets Manager in your account.", + "title": "Username", + "type": "string" + }, + "VpcSecurityGroupIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of VPC security group IDs to associate with the DB instance.", + "title": "VpcSecurityGroupIds", + "type": "array" + }, + "VpcSubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of VPC subnet IDs to associate with the DB instance. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby.", + "title": "VpcSubnetIds", + "type": "array" } }, - "required": [ - "ApplicationId", - "ApplicationType" - ], "type": "object" }, "Type": { "enum": [ - "AWS::SystemsManagerSAP::Application" + "AWS::Timestream::InfluxDBInstance" ], "type": "string" }, @@ -254669,105 +260016,41 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, - "AWS::SystemsManagerSAP::Application.Credential": { + "AWS::Timestream::InfluxDBInstance.LogDeliveryConfiguration": { "additionalProperties": false, "properties": { - "CredentialType": { - "markdownDescription": "The type of the application credentials.", - "title": "CredentialType", - "type": "string" - }, - "DatabaseName": { - "markdownDescription": "The name of the SAP HANA database.", - "title": "DatabaseName", - "type": "string" - }, - "SecretId": { - "markdownDescription": "The secret ID created in AWS Secrets Manager to store the credentials of the SAP application.", - "title": "SecretId", - "type": "string" + "S3Configuration": { + "$ref": "#/definitions/AWS::Timestream::InfluxDBInstance.S3Configuration", + "markdownDescription": "Configuration for S3 bucket log delivery", + "title": "S3Configuration" } }, + "required": [ + "S3Configuration" + ], "type": "object" }, - "AWS::Timestream::Database": { + "AWS::Timestream::InfluxDBInstance.S3Configuration": { "additionalProperties": false, "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { - "DatabaseName": { - "markdownDescription": "The name of the Timestream database.\n\n*Length Constraints* : Minimum length of 3 bytes. Maximum length of 256 bytes.", - "title": "DatabaseName", - "type": "string" - }, - "KmsKeyId": { - "markdownDescription": "The identifier of the AWS KMS key used to encrypt the data stored in the database.", - "title": "KmsKeyId", - "type": "string" - }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "The tags to add to the database.", - "title": "Tags", - "type": "array" - } - }, - "type": "object" - }, - "Type": { - "enum": [ - "AWS::Timestream::Database" - ], + "BucketName": { + "markdownDescription": "The bucket name of the customer S3 bucket.", + "title": "BucketName", "type": "string" }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" + "Enabled": { + "markdownDescription": "Indicates whether log delivery to the S3 bucket is enabled.", + "title": "Enabled", + "type": "boolean" } }, "required": [ - "Type" + "BucketName", + "Enabled" ], "type": "object" }, @@ -255533,7 +260816,7 @@ "type": "array" }, "Usage": { - "markdownDescription": "Specifies whether this certificate is used for signing or encryption.", + "markdownDescription": "Specifies how this certificate is used. It can be used in the following ways:\n\n- `SIGNING` : For signing AS2 messages\n- `ENCRYPTION` : For encrypting AS2 messages\n- `TLS` : For securing AS2 communications sent over HTTPS", "title": "Usage", "type": "string" } @@ -255861,7 +261144,7 @@ "type": "string" }, "Domain": { - "markdownDescription": "Specifies the domain of the storage system that is used for file transfers.", + "markdownDescription": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.", "title": "Domain", "type": "string" }, @@ -255919,7 +261202,7 @@ "title": "S3StorageOptions" }, "SecurityPolicyName": { - "markdownDescription": "Specifies the name of the security policy that is attached to the server.", + "markdownDescription": "Specifies the name of the security policy for the server.", "title": "SecurityPolicyName", "type": "string" }, @@ -256682,7 +261965,7 @@ "properties": { "Configuration": { "$ref": "#/definitions/AWS::VerifiedPermissions::IdentitySource.IdentitySourceConfiguration", - "markdownDescription": "Contains configuration information used when creating a new identity source.\n\n> At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.\n> \n> You must specify a `userPoolArn` , and optionally, a `ClientId` . \n\nThis data type is used as a request parameter for the [CreateIdentitySource](https://docs.aws.amazon.com/verifiedpermissions/latest/apireference/API_CreateIdentitySource.html) operation.", + "markdownDescription": "Contains configuration information about an identity source.", "title": "Configuration" }, "PolicyStoreId": { @@ -256723,6 +262006,20 @@ ], "type": "object" }, + "AWS::VerifiedPermissions::IdentitySource.CognitoGroupConfiguration": { + "additionalProperties": false, + "properties": { + "GroupEntityType": { + "markdownDescription": "The name of the schema entity type that's mapped to the user pool group. Defaults to `AWS::CognitoGroup` .", + "title": "GroupEntityType", + "type": "string" + } + }, + "required": [ + "GroupEntityType" + ], + "type": "object" + }, "AWS::VerifiedPermissions::IdentitySource.CognitoUserPoolConfiguration": { "additionalProperties": false, "properties": { @@ -256734,6 +262031,11 @@ "title": "ClientIds", "type": "array" }, + "GroupConfiguration": { + "$ref": "#/definitions/AWS::VerifiedPermissions::IdentitySource.CognitoGroupConfiguration", + "markdownDescription": "The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source.", + "title": "GroupConfiguration" + }, "UserPoolArn": { "markdownDescription": "The [Amazon Resource Name (ARN)](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) of the Amazon Cognito user pool that contains the identities to be authorized.", "title": "UserPoolArn", @@ -256759,35 +262061,6 @@ ], "type": "object" }, - "AWS::VerifiedPermissions::IdentitySource.IdentitySourceDetails": { - "additionalProperties": false, - "properties": { - "ClientIds": { - "items": { - "type": "string" - }, - "markdownDescription": "The application client IDs associated with the specified Amazon Cognito user pool that are enabled for this identity source.", - "title": "ClientIds", - "type": "array" - }, - "DiscoveryUrl": { - "markdownDescription": "The well-known URL that points to this user pool's OIDC discovery endpoint. This is a URL string in the following format. This URL replaces the placeholders for both the AWS Region and the user pool identifier with those appropriate for this user pool.\n\n`https://cognito-idp. ** .amazonaws.com/ ** /.well-known/openid-configuration`", - "title": "DiscoveryUrl", - "type": "string" - }, - "OpenIdIssuer": { - "markdownDescription": "A string that identifies the type of OIDC service represented by this identity source.\n\nAt this time, the only valid value is `cognito` .", - "title": "OpenIdIssuer", - "type": "string" - }, - "UserPoolArn": { - "markdownDescription": "The [Amazon Resource Name (ARN)](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) of the Amazon Cognito user pool whose identities are accessible to this Verified Permissions policy store.", - "title": "UserPoolArn", - "type": "string" - } - }, - "type": "object" - }, "AWS::VerifiedPermissions::Policy": { "additionalProperties": false, "properties": { @@ -264524,7 +269797,7 @@ "items": { "type": "string" }, - "markdownDescription": "The fields from the source that are made available to your agents in Amazon Q. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations.", + "markdownDescription": "The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations.", "title": "ObjectFields", "type": "array" } @@ -264850,12 +270123,12 @@ "type": "string" }, "DesktopArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces , WorkSpaces Web, or AppStream 2.0 .", + "markdownDescription": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.", "title": "DesktopArn", "type": "string" }, "DesktopEndpoint": { - "markdownDescription": "The URL for the identity provider login (only for environments that use AppStream 2.0 ).", + "markdownDescription": "The URL for the identity provider login (only for environments that use AppStream 2.0).", "title": "DesktopEndpoint", "type": "string" }, @@ -274429,6 +279702,18 @@ { "$ref": "#/definitions/AWS::Batch::SchedulingPolicy" }, + { + "$ref": "#/definitions/AWS::Bedrock::Agent" + }, + { + "$ref": "#/definitions/AWS::Bedrock::AgentAlias" + }, + { + "$ref": "#/definitions/AWS::Bedrock::DataSource" + }, + { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase" + }, { "$ref": "#/definitions/AWS::BillingConductor::BillingGroup" }, @@ -274492,6 +279777,12 @@ { "$ref": "#/definitions/AWS::CleanRooms::Membership" }, + { + "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate" + }, + { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset" + }, { "$ref": "#/definitions/AWS::Cloud9::EnvironmentEC2" }, @@ -274639,6 +279930,9 @@ { "$ref": "#/definitions/AWS::CodeCommit::Repository" }, + { + "$ref": "#/definitions/AWS::CodeConnections::Connection" + }, { "$ref": "#/definitions/AWS::CodeDeploy::Application" }, @@ -274978,6 +280272,30 @@ { "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget" }, + { + "$ref": "#/definitions/AWS::Deadline::Farm" + }, + { + "$ref": "#/definitions/AWS::Deadline::Fleet" + }, + { + "$ref": "#/definitions/AWS::Deadline::LicenseEndpoint" + }, + { + "$ref": "#/definitions/AWS::Deadline::MeteredProduct" + }, + { + "$ref": "#/definitions/AWS::Deadline::Queue" + }, + { + "$ref": "#/definitions/AWS::Deadline::QueueEnvironment" + }, + { + "$ref": "#/definitions/AWS::Deadline::QueueFleetAssociation" + }, + { + "$ref": "#/definitions/AWS::Deadline::StorageProfile" + }, { "$ref": "#/definitions/AWS::Detective::Graph" }, @@ -275320,6 +280638,9 @@ { "$ref": "#/definitions/AWS::ECR::Repository" }, + { + "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate" + }, { "$ref": "#/definitions/AWS::ECS::CapacityProvider" }, @@ -275473,9 +280794,15 @@ { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow" }, + { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace" + }, { "$ref": "#/definitions/AWS::EntityResolution::MatchingWorkflow" }, + { + "$ref": "#/definitions/AWS::EntityResolution::PolicyStatement" + }, { "$ref": "#/definitions/AWS::EntityResolution::SchemaMapping" }, @@ -275617,6 +280944,9 @@ { "$ref": "#/definitions/AWS::GlobalAccelerator::Accelerator" }, + { + "$ref": "#/definitions/AWS::GlobalAccelerator::CrossAccountAttachment" + }, { "$ref": "#/definitions/AWS::GlobalAccelerator::EndpointGroup" }, @@ -275824,15 +281154,24 @@ { "$ref": "#/definitions/AWS::IVS::Channel" }, + { + "$ref": "#/definitions/AWS::IVS::EncoderConfiguration" + }, { "$ref": "#/definitions/AWS::IVS::PlaybackKeyPair" }, + { + "$ref": "#/definitions/AWS::IVS::PlaybackRestrictionPolicy" + }, { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration" }, { "$ref": "#/definitions/AWS::IVS::Stage" }, + { + "$ref": "#/definitions/AWS::IVS::StorageConfiguration" + }, { "$ref": "#/definitions/AWS::IVS::StreamKey" }, @@ -277336,15 +282675,30 @@ { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule" }, + { + "$ref": "#/definitions/AWS::SecurityHub::DelegatedAdmin" + }, { "$ref": "#/definitions/AWS::SecurityHub::Hub" }, + { + "$ref": "#/definitions/AWS::SecurityHub::Insight" + }, + { + "$ref": "#/definitions/AWS::SecurityHub::ProductSubscription" + }, { "$ref": "#/definitions/AWS::SecurityHub::Standard" }, + { + "$ref": "#/definitions/AWS::SecurityLake::AwsLogSource" + }, { "$ref": "#/definitions/AWS::SecurityLake::DataLake" }, + { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber" + }, { "$ref": "#/definitions/AWS::ServiceCatalog::AcceptedPortfolioShare" }, @@ -277474,6 +282828,9 @@ { "$ref": "#/definitions/AWS::Timestream::Database" }, + { + "$ref": "#/definitions/AWS::Timestream::InfluxDBInstance" + }, { "$ref": "#/definitions/AWS::Timestream::ScheduledQuery" }, diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index 5c019c39f..6fca09b50 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -466,12 +466,10 @@ "AppId": "The unique ID for an Amplify app.", "AutoSubDomainCreationPatterns": "Sets the branch patterns for automatic subdomain creation.", "AutoSubDomainIAMRole": "The required AWS Identity and Access Management (IAMlong) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.", - "Certificate": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", "CertificateSettings": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", "DomainName": "The domain name for the domain association.", "EnableAutoSubDomain": "Enables the automated creation of subdomains for branches.", - "SubDomainSettings": "The setting for the subdomain.", - "UpdateStatus": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to." + "SubDomainSettings": "The setting for the subdomain." }, "AWS::Amplify::Domain Certificate": { "CertificateArn": "The Amazon resource name (ARN) for a custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", @@ -1280,7 +1278,7 @@ "AWS::AppConfig::ConfigurationProfile": { "ApplicationId": "The application ID.", "Description": "A description of the configuration profile.", - "KmsKeyIdentifier": "", + "KmsKeyIdentifier": "The AWS Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.", "LocationUri": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://my-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", "Name": "A name for the configuration profile.", "RetrievalRoleArn": "The ARN of an IAM role with permission to access the configuration at the specified `LocationUri` .\n\n> A retrieval role ARN is not required for configurations stored in the AWS AppConfig hosted configuration store. It is required for all other sources that store your configuration.", @@ -1302,15 +1300,15 @@ "ConfigurationVersion": "The configuration version to deploy. If deploying an AWS AppConfig hosted configuration version, you can specify either the version number or version label. For all other configurations, you must specify the version number.", "DeploymentStrategyId": "The deployment strategy ID.", "Description": "A description of the deployment.", - "DynamicExtensionParameters": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", + "DynamicExtensionParameters": "A map of dynamic extension parameter names to values to pass to associated extensions with `PRE_START_DEPLOYMENT` actions.", "EnvironmentId": "The environment ID.", "KmsKeyIdentifier": "The AWS Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.", "Tags": "Metadata to assign to the deployment. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define." }, "AWS::AppConfig::Deployment DynamicExtensionParameters": { - "ExtensionReference": "", - "ParameterName": "", - "ParameterValue": "" + "ExtensionReference": "The ARN or ID of the extension for which you are inserting a dynamic parameter.", + "ParameterName": "The parameter name.", + "ParameterValue": "The parameter value." }, "AWS::AppConfig::Deployment Tags": { "Key": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", @@ -1342,8 +1340,8 @@ "AlarmRoleArn": "ARN of an AWS Identity and Access Management (IAM) role for AWS AppConfig to monitor `AlarmArn` ." }, "AWS::AppConfig::Environment Tag": { - "Key": "", - "Value": "" + "Key": "A key and optional value to help you categorize resources.", + "Value": "An optional value for a tag key." }, "AWS::AppConfig::Extension": { "Actions": "The actions defined in the extension.", @@ -1354,7 +1352,7 @@ "Tags": "Adds one or more tags for the specified extension. Tags are metadata that help you categorize resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define." }, "AWS::AppConfig::Extension Action": { - "Description": "Information about the action.", + "Description": "Information about actions defined in the extension.", "Name": "The action name.", "RoleArn": "An Amazon Resource Name (ARN) for an AWS Identity and Access Management assume role.", "Uri": "The extension URI associated to the action point in the extension definition. The URI can be an Amazon Resource Name (ARN) for one of the following: an AWS Lambda function, an Amazon Simple Queue Service queue, an Amazon Simple Notification Service topic, or the Amazon EventBridge default event bus." @@ -1365,8 +1363,8 @@ "Required": "A parameter value must be specified in the extension association." }, "AWS::AppConfig::Extension Tag": { - "Key": "", - "Value": "" + "Key": "A key and optional value to help you categorize resources.", + "Value": "An optional value for a tag key." }, "AWS::AppConfig::ExtensionAssociation": { "ExtensionIdentifier": "The name, the ID, or the Amazon Resource Name (ARN) of the extension.", @@ -1376,7 +1374,7 @@ "Tags": "Adds one or more tags for the specified extension association. Tags are metadata that help you categorize resources in different ways, for example, by purpose, owner, or environment. Each tag consists of a key and an optional value, both of which you define." }, "AWS::AppConfig::ExtensionAssociation Tag": { - "Key": "", + "Key": "A key and optional value to help you categorize resources.", "Value": "" }, "AWS::AppConfig::HostedConfigurationVersion": { @@ -1933,6 +1931,7 @@ "Description": "The description of the application.", "Name": "The name of the application.", "Namespace": "The namespace of the application.", + "Permissions": "", "Tags": "The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }." }, "AWS::AppIntegrations::Application ApplicationSourceConfig": { @@ -2752,7 +2751,7 @@ "AWS::AppRunner::VpcConnector": { "SecurityGroups": "A list of IDs of security groups that App Runner should use for access to AWS resources under the specified subnets. If not specified, App Runner uses the default security group of the Amazon VPC. The default security group allows all outbound traffic.", "Subnets": "A list of IDs of subnets that App Runner should use when it associates your service with a custom Amazon VPC. Specify IDs of subnets of a single Amazon VPC. App Runner determines the Amazon VPC from the subnets you specify.\n\n> App Runner currently only provides support for IPv4.", - "Tags": "A list of metadata items that you can associate with your VPC connector resource. A tag is a key-value pair.", + "Tags": "A list of metadata items that you can associate with your VPC connector resource. A tag is a key-value pair.\n\n> A `VpcConnector` is immutable, so you cannot update its tags. To change the tags, replace the resource. To replace a `VpcConnector` , you must provide a new combination of security groups.", "VpcConnectorName": "A name for the VPC connector.\n\nIf you don't specify a name, AWS CloudFormation generates a name for your VPC connector." }, "AWS::AppRunner::VpcConnector Tag": { @@ -3599,30 +3598,30 @@ "AvailabilityZones": "A list of Availability Zones where instances in the Auto Scaling group can be created. Used for launching into the default VPC subnet in each Availability Zone when not using the `VPCZoneIdentifier` property, or for attaching a network interface when an existing network interface ID is specified in a launch template.", "CapacityRebalance": "Indicates whether Capacity Rebalancing is enabled. Otherwise, Capacity Rebalancing is disabled. When you turn on Capacity Rebalancing, Amazon EC2 Auto Scaling attempts to launch a Spot Instance whenever Amazon EC2 notifies that a Spot Instance is at an elevated risk of interruption. After launching a new instance, it then terminates an old instance. For more information, see [Use Capacity Rebalancing to handle Amazon EC2 Spot Interruptions](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-capacity-rebalancing.html) in the in the *Amazon EC2 Auto Scaling User Guide* .", "Context": "Reserved.", - "Cooldown": "*Only needed if you use simple scaling policies.*\n\nThe amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `300` seconds", + "Cooldown": "*Only needed if you use simple scaling policies.*\n\nThe amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `300` seconds", "DefaultInstanceWarmup": "The amount of time, in seconds, until a new instance is considered to have finished initializing and resource consumption to become stable after it enters the `InService` state.\n\nDuring an instance refresh, Amazon EC2 Auto Scaling waits for the warm-up period after it replaces an instance before it moves on to replacing the next instance. Amazon EC2 Auto Scaling also waits for the warm-up period before aggregating the metrics for new instances with existing instances in the Amazon CloudWatch metrics that are used for scaling, resulting in more reliable usage data. For more information, see [Set the default instance warmup for an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-default-instance-warmup.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\n> To manage various warm-up settings at the group level, we recommend that you set the default instance warmup, *even if it is set to 0 seconds* . To remove a value that you previously set, include the property but specify `-1` for the value. However, we strongly recommend keeping the default instance warmup enabled by specifying a value of `0` or other nominal value. \n\nDefault: None", "DesiredCapacity": "The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure automatic scaling.\n\nThe number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity when creating the stack, the default is the minimum size of the group.\n\nCloudFormation marks the Auto Scaling group as successful (by setting its status to CREATE_COMPLETE) when the desired capacity is reached. However, if a maximum Spot price is set in the launch template or launch configuration that you specified, then desired capacity is not used as a criteria for success. Whether your request is fulfilled depends on Spot Instance capacity and your maximum price.", - "DesiredCapacityType": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Creating an Auto Scaling group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", + "DesiredCapacityType": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Create a mixed instances group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", "HealthCheckGracePeriod": "The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. This is useful if your instances do not immediately pass their health checks after they enter the `InService` state. For more information, see [Set the health check grace period for an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `0` seconds", - "HealthCheckType": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "HealthCheckType": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "InstanceId": "The ID of the instance used to base the launch configuration on. For more information, see [Create an Auto Scaling group using an EC2 instance](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify `LaunchTemplate` , `MixedInstancesPolicy` , or `LaunchConfigurationName` , don't specify `InstanceId` .", "InstanceMaintenancePolicy": "An instance maintenance policy. For more information, see [Set instance maintenance policy](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html) in the *Amazon EC2 Auto Scaling User Guide* .", "LaunchConfigurationName": "The name of the launch configuration to use to launch instances.\n\nRequired only if you don't specify `LaunchTemplate` , `MixedInstancesPolicy` , or `InstanceId` .", "LaunchTemplate": "Information used to specify the launch template and version to use to launch instances. You can alternatively associate a launch template to the Auto Scaling group by specifying a `MixedInstancesPolicy` . For more information about creating launch templates, see [Create a launch template for an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you omit this property, you must specify `MixedInstancesPolicy` , `LaunchConfigurationName` , or `InstanceId` .", "LifecycleHookSpecificationList": "One or more lifecycle hooks to add to the Auto Scaling group before instances are launched.", "LoadBalancerNames": "A list of Classic Load Balancers associated with this Auto Scaling group. For Application Load Balancers, Network Load Balancers, and Gateway Load Balancers, specify the `TargetGroupARNs` property instead.", - "MaxInstanceLifetime": "The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see [Replacing Auto Scaling instances based on maximum instance lifetime](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "MaxInstanceLifetime": "The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see [Replace Auto Scaling instances based on maximum instance lifetime](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) in the *Amazon EC2 Auto Scaling User Guide* .", "MaxSize": "The maximum size of the group.\n\n> With a mixed instances policy that uses instance weighting, Amazon EC2 Auto Scaling may need to go above `MaxSize` to meet your capacity requirements. In this event, Amazon EC2 Auto Scaling will never go above `MaxSize` by more than your largest instance weight (weights that define how many units each instance contributes to the desired capacity of the group).", "MetricsCollection": "Enables the monitoring of group metrics of an Auto Scaling group. By default, these metrics are disabled.", "MinSize": "The minimum size of the group.", "MixedInstancesPolicy": "An embedded object that specifies a mixed instances policy.\n\nThe policy includes properties that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances (optional), and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the properties that specify the instance configuration information\u2014the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types.\n\nFor more information, see [Auto Scaling groups with multiple instance types and purchase options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html) in the *Amazon EC2 Auto Scaling User Guide* .", - "NewInstancesProtectedFromScaleIn": "Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see [Using instance scale-in protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "NewInstancesProtectedFromScaleIn": "Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see [Use instance scale-in protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) in the *Amazon EC2 Auto Scaling User Guide* .", "NotificationConfigurations": "Configures an Auto Scaling group to send notifications when specified events take place.", "PlacementGroup": "The name of the placement group into which to launch your instances. For more information, see [Placement groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\n> A *cluster* placement group is a logical grouping of instances within a single Availability Zone. You cannot specify multiple Availability Zones and a cluster placement group.", "ServiceLinkedRoleARN": "The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS service on your behalf. By default, Amazon EC2 Auto Scaling uses a service-linked role named `AWSServiceRoleForAutoScaling` , which it creates if it does not exist. For more information, see [Service-linked roles](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-service-linked-role.html) in the *Amazon EC2 Auto Scaling User Guide* .", "Tags": "One or more tags. You can tag your Auto Scaling group and propagate the tags to the Amazon EC2 instances it launches. Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS volumes, specify the tags in a launch template but use caution. If the launch template specifies an instance tag with a key that is also specified for the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that instance tag with the value specified by the Auto Scaling group. For more information, see [Tag Auto Scaling groups and instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-tagging.html) in the *Amazon EC2 Auto Scaling User Guide* .", "TargetGroupARNs": "The Amazon Resource Names (ARN) of the Elastic Load Balancing target groups to associate with the Auto Scaling group. Instances are registered as targets with the target groups. The target groups receive incoming traffic and route requests to one or more registered targets. For more information, see [Use Elastic Load Balancing to distribute traffic across the instances in your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-load-balancer.html) in the *Amazon EC2 Auto Scaling User Guide* .", - "TerminationPolicies": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Work with Amazon EC2 Auto Scaling termination policies](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", + "TerminationPolicies": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Configure termination policies for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", "VPCZoneIdentifier": "A list of subnet IDs for a virtual private cloud (VPC) where instances in the Auto Scaling group can be created.\n\nIf this resource specifies public subnets and is also in a VPC that is defined in the same stack template, you must use the [DependsOn attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html) to declare a dependency on the [VPC-gateway attachment](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-vpc-gateway-attachment.html) .\n\n> When you update `VPCZoneIdentifier` , this retains the same Auto Scaling group and replaces old instances with new ones, according to the specified subnets. You can optionally specify how CloudFormation handles these updates by using an [UpdatePolicy attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html) . \n\nRequired to launch instances into a nondefault VPC. If you specify `VPCZoneIdentifier` with `AvailabilityZones` , the subnets that you specify for this property must reside in those Availability Zones." }, "AWS::AutoScaling::AutoScalingGroup AcceleratorCountRequest": { @@ -3681,7 +3680,7 @@ }, "AWS::AutoScaling::AutoScalingGroup LaunchTemplateOverrides": { "InstanceRequirements": "The instance requirements. Amazon EC2 Auto Scaling uses your specified requirements to identify instance types. Then, it uses your On-Demand and Spot allocation strategies to launch instances from these instance types.\n\nYou can specify up to four separate sets of instance requirements per Auto Scaling group. This is useful for provisioning instances from different Amazon Machine Images (AMIs) in the same Auto Scaling group. To do this, create the AMIs and create a new launch template for each AMI. Then, create a compatible set of instance requirements for each launch template.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .", - "InstanceType": "The instance type, such as `m3.xlarge` . You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nYou can specify up to 40 instance types per Auto Scaling group.", + "InstanceType": "The instance type, such as `m3.xlarge` . You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nYou can specify up to 40 instance types per Auto Scaling group.", "LaunchTemplateSpecification": "Provides a launch template for the specified instance type or set of instance requirements. For example, some instance types might require a launch template with a different AMI. If not provided, Amazon EC2 Auto Scaling uses the launch template that's specified in the `LaunchTemplate` definition. For more information, see [Specifying a different launch template for an instance type](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-launch-template-overrides.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nYou can specify up to 20 launch templates per Auto Scaling group. The launch templates specified in the overrides and in the `LaunchTemplate` definition count towards this limit.", "WeightedCapacity": "If you provide a list of instance types to use, you can specify the number of capacity units provided by each instance type in terms of virtual CPUs, memory, storage, throughput, or other relative performance characteristic. When a Spot or On-Demand Instance is launched, the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling launches instances until the desired capacity is totally fulfilled, even if this results in an overage. For example, if there are two units remaining to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance with a `WeightedCapacity` of five units, the instance is launched, and the desired capacity is exceeded by three units. For more information, see [Configure instance weighting for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html) in the *Amazon EC2 Auto Scaling User Guide* . Value must be in the range of 1-999.\n\nIf you specify a value for `WeightedCapacity` for one instance type, you must specify a value for `WeightedCapacity` for all of them.\n\n> Every Auto Scaling group has three size parameters ( `DesiredCapacity` , `MaxSize` , and `MinSize` ). Usually, you set these sizes based on a specific number of instances. However, if you configure a mixed instances policy that defines weights for the instance types, you must specify these sizes with the same units that you use for weighting instances." }, @@ -3697,7 +3696,7 @@ "LifecycleTransition": "The lifecycle transition. For Auto Scaling groups, there are two major lifecycle transitions.\n\n- To create a lifecycle hook for scale-out events, specify `autoscaling:EC2_INSTANCE_LAUNCHING` .\n- To create a lifecycle hook for scale-in events, specify `autoscaling:EC2_INSTANCE_TERMINATING` .", "NotificationMetadata": "Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.", "NotificationTargetARN": "The Amazon Resource Name (ARN) of the notification target that Amazon EC2 Auto Scaling sends notifications to when an instance is in a wait state for the lifecycle hook. You can specify an Amazon SNS topic or an Amazon SQS queue.", - "RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Configure a notification target for a lifecycle hook](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue." + "RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Prepare to add a lifecycle hook to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue." }, "AWS::AutoScaling::AutoScalingGroup MemoryGiBPerVCpuRequest": { "Max": "The memory maximum in GiB.", @@ -3709,7 +3708,7 @@ }, "AWS::AutoScaling::AutoScalingGroup MetricsCollection": { "Granularity": "The frequency at which Amazon EC2 Auto Scaling sends aggregated data to CloudWatch. The only valid value is `1Minute` .", - "Metrics": "Identifies the metrics to enable.\n\nYou can specify one or more of the following metrics:\n\n- `GroupMinSize`\n- `GroupMaxSize`\n- `GroupDesiredCapacity`\n- `GroupInServiceInstances`\n- `GroupPendingInstances`\n- `GroupStandbyInstances`\n- `GroupTerminatingInstances`\n- `GroupTotalInstances`\n- `GroupInServiceCapacity`\n- `GroupPendingCapacity`\n- `GroupStandbyCapacity`\n- `GroupTerminatingCapacity`\n- `GroupTotalCapacity`\n- `WarmPoolDesiredCapacity`\n- `WarmPoolWarmedCapacity`\n- `WarmPoolPendingCapacity`\n- `WarmPoolTerminatingCapacity`\n- `WarmPoolTotalCapacity`\n- `GroupAndWarmPoolDesiredCapacity`\n- `GroupAndWarmPoolTotalCapacity`\n\nIf you specify `Granularity` and don't specify any metrics, all metrics are enabled.\n\nFor more information, see [Auto Scaling group metrics](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics) in the *Amazon EC2 Auto Scaling User Guide* ." + "Metrics": "Identifies the metrics to enable.\n\nYou can specify one or more of the following metrics:\n\n- `GroupMinSize`\n- `GroupMaxSize`\n- `GroupDesiredCapacity`\n- `GroupInServiceInstances`\n- `GroupPendingInstances`\n- `GroupStandbyInstances`\n- `GroupTerminatingInstances`\n- `GroupTotalInstances`\n- `GroupInServiceCapacity`\n- `GroupPendingCapacity`\n- `GroupStandbyCapacity`\n- `GroupTerminatingCapacity`\n- `GroupTotalCapacity`\n- `WarmPoolDesiredCapacity`\n- `WarmPoolWarmedCapacity`\n- `WarmPoolPendingCapacity`\n- `WarmPoolTerminatingCapacity`\n- `WarmPoolTotalCapacity`\n- `GroupAndWarmPoolDesiredCapacity`\n- `GroupAndWarmPoolTotalCapacity`\n\nIf you specify `Granularity` and don't specify any metrics, all metrics are enabled.\n\nFor more information, see [Amazon CloudWatch metrics for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* ." }, "AWS::AutoScaling::AutoScalingGroup MixedInstancesPolicy": { "InstancesDistribution": "The instances distribution.", @@ -3741,21 +3740,21 @@ "Min": "The minimum number of vCPUs." }, "AWS::AutoScaling::LaunchConfiguration": { - "AssociatePublicIpAddress": "Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.\n\nIf you specify `true` , each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see [Launching Auto Scaling instances in a VPC](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify this property, you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.", + "AssociatePublicIpAddress": "Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.\n\nIf you specify `true` , each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see [Provide network connectivity for your Auto Scaling instances using Amazon VPC](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify this property, you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.", "BlockDeviceMappings": "The block device mapping entries that define the block devices to attach to the instances at launch. By default, the block devices specified in the block device mapping for the AMI are used. For more information, see [Block device mappings](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) in the *Amazon EC2 User Guide for Linux Instances* .", "ClassicLinkVPCId": "Available for backward compatibility.", "ClassicLinkVPCSecurityGroups": "Available for backward compatibility.", - "EbsOptimized": "Specifies whether the launch configuration is optimized for EBS I/O ( `true` ) or not ( `false` ). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see [Amazon EBS-optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nThe default value is `false` .", + "EbsOptimized": "Specifies whether the launch configuration is optimized for EBS I/O ( `true` ) or not ( `false` ). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see [Amazon EBS-optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nThe default value is `false` .", "IamInstanceProfile": "The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instance. The instance profile contains the IAM role. For more information, see [IAM role for applications that run on Amazon EC2 instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html) in the *Amazon EC2 Auto Scaling User Guide* .", - "ImageId": "The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see [Finding a Linux AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nIf you specify `InstanceId` , an `ImageId` is not required.", + "ImageId": "The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see [Find a Linux AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nIf you specify `InstanceId` , an `ImageId` is not required.", "InstanceId": "The ID of the Amazon EC2 instance to use to create the launch configuration. When you use an instance to create a launch configuration, all properties are derived from the instance with the exception of `BlockDeviceMapping` and `AssociatePublicIpAddress` . You can override any properties from the instance by specifying them in the launch configuration.", - "InstanceMonitoring": "Controls whether instances in this group are launched with detailed ( `true` ) or basic ( `false` ) monitoring.\n\nThe default value is `true` (enabled).\n\n> When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see [Configure Monitoring for Auto Scaling Instances](https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "InstanceMonitoring": "Controls whether instances in this group are launched with detailed ( `true` ) or basic ( `false` ) monitoring.\n\nThe default value is `true` (enabled).\n\n> When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see [Configure monitoring for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", "InstanceType": "Specifies the instance type of the EC2 instance. For information about available instance types, see [Available instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) in the *Amazon EC2 User Guide for Linux Instances* .\n\nIf you specify `InstanceId` , an `InstanceType` is not required.", "KernelId": "The ID of the kernel associated with the AMI.\n\n> We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [User provided kernels](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html) in the *Amazon EC2 User Guide for Linux Instances* .", - "KeyName": "The name of the key pair. For more information, see [Amazon EC2 key pairs and Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon EC2 User Guide for Linux Instances* .", + "KeyName": "The name of the key pair. For more information, see [Amazon EC2 key pairs and Amazon EC2 instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon EC2 User Guide for Linux Instances* .", "LaunchConfigurationName": "The name of the launch configuration. This name must be unique per Region per account.", - "MetadataOptions": "The metadata options for the instances. For more information, see [Configuring the Instance Metadata Options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) in the *Amazon EC2 Auto Scaling User Guide* .", - "PlacementTenancy": "The tenancy of the instance, either `default` or `dedicated` . An instance with `dedicated` tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to `default` ), you must set the value of this property to `dedicated` . For more information, see [Configuring instance tenancy with Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-dedicated-instances.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify `PlacementTenancy` , you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.\n\nValid values: `default` | `dedicated`", + "MetadataOptions": "The metadata options for the instances. For more information, see [Configure the instance metadata options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) in the *Amazon EC2 Auto Scaling User Guide* .", + "PlacementTenancy": "The tenancy of the instance, either `default` or `dedicated` . An instance with `dedicated` tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to `default` ), you must set the value of this property to `dedicated` .\n\nIf you specify `PlacementTenancy` , you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.\n\nValid values: `default` | `dedicated`", "RamDiskId": "The ID of the RAM disk to select.\n\n> We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [User provided kernels](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedKernels.html) in the *Amazon EC2 User Guide for Linux Instances* .", "SecurityGroups": "A list that contains the security groups to assign to the instances in the Auto Scaling group. The list can contain both the IDs of existing security groups and references to [SecurityGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group.html) resources created in the template.\n\nFor more information, see [Control traffic to resources using security groups](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) in the *Amazon Virtual Private Cloud User Guide* .", "SpotPrice": "The maximum hourly price to be paid for any Spot Instance launched to fulfill the request. Spot Instances are launched when the price you specify exceeds the current Spot price. For more information, see [Request Spot Instances for fault-tolerant and flexible applications](https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-template-spot-instances.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid Range: Minimum value of 0.001\n\n> When you change your maximum price by creating a new launch configuration, running instances will continue to run as long as the maximum price for those running instances is higher than the current Spot price.", @@ -3763,12 +3762,12 @@ }, "AWS::AutoScaling::LaunchConfiguration BlockDevice": { "DeleteOnTermination": "Indicates whether the volume is deleted on instance termination. For Amazon EC2 Auto Scaling, the default value is `true` .", - "Encrypted": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.\n\n> If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.\n> \n> If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the AWS managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.\n> \n> For more information, see [Use AWS KMS keys to encrypt Amazon EBS volumes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-data-protection.html#encryption) in the *Amazon EC2 Auto Scaling User Guide* .", + "Encrypted": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see [Requirements for Amazon EBS encryption](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html) in the *Amazon EBS User Guide* . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.\n\n> If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.\n> \n> If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the AWS managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.\n> \n> For more information, see [Use AWS KMS keys to encrypt Amazon EBS volumes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-data-protection.html#encryption) in the *Amazon EC2 Auto Scaling User Guide* .", "Iops": "The number of input/output (I/O) operations per second (IOPS) to provision for the volume. For `gp3` and `io1` volumes, this represents the number of IOPS that are provisioned for the volume. For `gp2` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.\n\nThe following are the supported values for each volume type:\n\n- `gp3` : 3,000-16,000 IOPS\n- `io1` : 100-64,000 IOPS\n\nFor `io1` volumes, we guarantee 64,000 IOPS only for [Instances built on the Nitro System](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) . Other instance families guarantee performance up to 32,000 IOPS.\n\n`Iops` is supported when the volume type is `gp3` or `io1` and required only when the volume type is `io1` . (Not used with `standard` , `gp2` , `st1` , or `sc1` volumes.)", "SnapshotId": "The snapshot ID of the volume to use.\n\nYou must specify either a `VolumeSize` or a `SnapshotId` .", "Throughput": "The throughput (MiBps) to provision for a `gp3` volume.", "VolumeSize": "The volume size, in GiBs. The following are the supported volumes sizes for each volume type:\n\n- `gp2` and `gp3` : 1-16,384\n- `io1` : 4-16,384\n- `st1` and `sc1` : 125-16,384\n- `standard` : 1-1,024\n\nYou must specify either a `SnapshotId` or a `VolumeSize` . If you specify both `SnapshotId` and `VolumeSize` , the volume size must be equal or greater than the size of the snapshot.", - "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nValid values: `standard` | `io1` | `gp2` | `st1` | `sc1` | `gp3`" + "VolumeType": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .\n\nValid values: `standard` | `io1` | `gp2` | `st1` | `sc1` | `gp3`" }, "AWS::AutoScaling::LaunchConfiguration BlockDeviceMapping": { "DeviceName": "The device name assigned to the volume (for example, `/dev/sdh` or `xvdh` ). For more information, see [Device naming on Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\n> To define a block device mapping, set the device name and exactly one of the following properties: `Ebs` , `NoDevice` , or `VirtualName` .", @@ -3789,12 +3788,12 @@ "LifecycleTransition": "The lifecycle transition. For Auto Scaling groups, there are two major lifecycle transitions.\n\n- To create a lifecycle hook for scale-out events, specify `autoscaling:EC2_INSTANCE_LAUNCHING` .\n- To create a lifecycle hook for scale-in events, specify `autoscaling:EC2_INSTANCE_TERMINATING` .", "NotificationMetadata": "Additional information that you want to include any time Amazon EC2 Auto Scaling sends a message to the notification target.", "NotificationTargetARN": "The Amazon Resource Name (ARN) of the notification target that Amazon EC2 Auto Scaling sends notifications to when an instance is in a wait state for the lifecycle hook. You can specify an Amazon SNS topic or an Amazon SQS queue.", - "RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Configure a notification target for a lifecycle hook](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue." + "RoleARN": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Prepare to add a lifecycle hook to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue." }, "AWS::AutoScaling::ScalingPolicy": { "AdjustmentType": "Specifies how the scaling adjustment is interpreted (for example, an absolute number or a percentage). The valid values are `ChangeInCapacity` , `ExactCapacity` , and `PercentChangeInCapacity` .\n\nRequired if the policy type is `StepScaling` or `SimpleScaling` . For more information, see [Scaling adjustment types](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-adjustment) in the *Amazon EC2 Auto Scaling User Guide* .", "AutoScalingGroupName": "The name of the Auto Scaling group.", - "Cooldown": "A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.\n\nValid only if the policy type is `SimpleScaling` . For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: None", + "Cooldown": "A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.\n\nValid only if the policy type is `SimpleScaling` . For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: None", "EstimatedInstanceWarmup": "*Not needed if the default instance warmup is defined for the group.*\n\nThe estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. This warm-up period applies to instances launched due to a specific target tracking or step scaling policy. When a warm-up period is specified here, it overrides the default instance warmup.\n\nValid only if the policy type is `TargetTrackingScaling` or `StepScaling` .\n\n> The default is to use the value for the default instance warmup defined for the group. If default instance warmup is null, then `EstimatedInstanceWarmup` falls back to the value of default cooldown.", "MetricAggregationType": "The aggregation type for the CloudWatch metrics. The valid values are `Minimum` , `Maximum` , and `Average` . If the aggregation type is null, the value is treated as `Average` .\n\nValid only if the policy type is `StepScaling` .", "MinAdjustmentMagnitude": "The minimum value to scale by when the adjustment type is `PercentChangeInCapacity` . For example, suppose that you create a step scaling policy to scale out an Auto Scaling group by 25 percent and you specify a `MinAdjustmentMagnitude` of 2. If the group has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. However, because you specified a `MinAdjustmentMagnitude` of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances.\n\nValid only if the policy type is `StepScaling` or `SimpleScaling` . For more information, see [Scaling adjustment types](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-adjustment) in the *Amazon EC2 Auto Scaling User Guide* .\n\n> Some Auto Scaling groups use instance weights. In this case, set the `MinAdjustmentMagnitude` to a value that is at least as large as your largest instance weight.", @@ -3837,7 +3836,7 @@ "ResourceLabel": "A label that uniquely identifies a specific Application Load Balancer target group from which to determine the average request count served by your Auto Scaling group. You can't specify a resource label unless the target group is attached to the Auto Scaling group.\n\nYou create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). The format of the resource label is:\n\n`app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff` .\n\nWhere:\n\n- app// is the final portion of the load balancer ARN\n- targetgroup// is the final portion of the target group ARN.\n\nTo find the ARN for an Application Load Balancer, use the [DescribeLoadBalancers](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) API operation. To find the ARN for the target group, use the [DescribeTargetGroups](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetGroups.html) API operation." }, "AWS::AutoScaling::ScalingPolicy PredictiveScalingConfiguration": { - "MaxCapacityBreachBehavior": "Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to `HonorMaxCapacity` if not specified.\n\nThe following are possible values:\n\n- `HonorMaxCapacity` - Amazon EC2 Auto Scaling cannot scale out capacity higher than the maximum capacity. The maximum capacity is enforced as a hard limit.\n- `IncreaseMaxCapacity` - Amazon EC2 Auto Scaling can scale out capacity higher than the maximum capacity when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for `MaxCapacityBuffer` .", + "MaxCapacityBreachBehavior": "Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to `HonorMaxCapacity` if not specified.\n\nThe following are possible values:\n\n- `HonorMaxCapacity` - Amazon EC2 Auto Scaling can't increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity.\n- `IncreaseMaxCapacity` - Amazon EC2 Auto Scaling can increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for `MaxCapacityBuffer` .\n\n> Use caution when allowing the maximum capacity to be automatically increased. This can lead to more instances being launched than intended if the increased maximum capacity is not monitored and managed. The increased maximum capacity then becomes the new normal maximum capacity for the Auto Scaling group until you manually update it. The maximum capacity does not automatically decrease back to the original maximum.", "MaxCapacityBuffer": "The size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. The value is specified as a percentage relative to the forecast capacity. For example, if the buffer is 10, this means a 10 percent buffer, such that if the forecast capacity is 50, and the maximum capacity is 40, then the effective maximum capacity is 55.\n\nIf set to 0, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity.\n\nRequired if the `MaxCapacityBreachBehavior` property is set to `IncreaseMaxCapacity` , and cannot be used otherwise.", "MetricSpecifications": "This structure includes the metrics and target utilization to use for predictive scaling.\n\nThis is an array, but we currently only support a single metric specification. That is, you can specify a target value and a single metric pair, or a target value and one scaling metric and one load metric.", "Mode": "The predictive scaling mode. Defaults to `ForecastOnly` if not specified.", @@ -4082,7 +4081,7 @@ }, "AWS::Backup::BackupPlan": { "BackupPlan": "Uniquely identifies the backup plan to be associated with the selection of resources.", - "BackupPlanTags": "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair. The specified tags are assigned to all backups created with this plan." + "BackupPlanTags": "The tags to assign to the backup plan." }, "AWS::Backup::BackupPlan AdvancedBackupSettingResourceType": { "BackupOptions": "The backup option for the resource. Each option is a key-value pair. This option is only available for Windows VSS backup jobs.\n\nValid values:\n\nSet to `\"WindowsVSS\":\"enabled\"` to enable the `WindowsVSS` backup option and create a Windows VSS backup.\n\nSet to `\"WindowsVSS\":\"disabled\"` to create a regular backup. The `WindowsVSS` option is not enabled by default.\n\nIf you specify an invalid option, you get an `InvalidParameterValueException` exception.\n\nFor more information about Windows VSS backups, see [Creating a VSS-Enabled Windows Backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/windows-backups.html) .", @@ -4098,7 +4097,7 @@ "CopyActions": "An array of CopyAction objects, which contains the details of the copy operation.", "EnableContinuousBackup": "Enables continuous backup and point-in-time restores (PITR).", "Lifecycle": "The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. AWS Backup transitions and expires backups automatically according to the lifecycle that you define.", - "RecoveryPointTags": "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.", + "RecoveryPointTags": "The tags to assign to the resources.", "RuleName": "A display name for a backup rule.", "ScheduleExpression": "A CRON expression specifying when AWS Backup initiates a backup job.", "ScheduleExpressionTimezone": "This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", @@ -4112,7 +4111,7 @@ "AWS::Backup::BackupPlan LifecycleResourceType": { "DeleteAfterDays": "Specifies the number of days after creation that a recovery point is deleted. Must be greater than `MoveToColdStorageAfterDays` .", "MoveToColdStorageAfterDays": "Specifies the number of days after creation that a recovery point is moved to cold storage.", - "OptInToArchiveForSupportedResources": "Optional Boolean. If this is true, this setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings." + "OptInToArchiveForSupportedResources": "If the value is true, your backup plan transitions supported resources to archive (cold) storage tier in accordance with your lifecycle settings." }, "AWS::Backup::BackupSelection": { "BackupPlanId": "Uniquely identifies a backup plan.", @@ -4144,7 +4143,7 @@ "AWS::Backup::BackupVault": { "AccessPolicy": "A resource-based policy that is used to manage access permissions on the target backup vault.", "BackupVaultName": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", - "BackupVaultTags": "Metadata that you can assign to help organize the resources that you create. Each tag is a key-value pair.", + "BackupVaultTags": "The tags to assign to the backup vault.", "EncryptionKeyArn": "A server-side encryption key you can specify to encrypt your backups from services that support full AWS Backup management; for example, `arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` . If you specify a key, you must specify its ARN, not its alias. If you do not specify a key, AWS Backup creates a KMS key for you by default.\n\nTo learn which AWS Backup services support full AWS Backup management and how AWS Backup handles encryption for backups from services that do not yet support full AWS Backup , see [Encryption for backups in AWS Backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/encryption.html)", "LockConfiguration": "Configuration for [AWS Backup Vault Lock](https://docs.aws.amazon.com/aws-backup/latest/devguide/vault-lock.html) .", "Notifications": "The SNS event notifications for the specified backup vault." @@ -4162,7 +4161,7 @@ "FrameworkControls": "Contains detailed information about all of the controls of a framework. Each framework must contain at least one control.", "FrameworkDescription": "An optional description of the framework with a maximum 1,024 characters.", "FrameworkName": "The unique name of a framework. This name is between 1 and 256 characters, starting with a letter, and consisting of letters (a-z, A-Z), numbers (0-9), and underscores (_).", - "FrameworkTags": "A list of tags with which to tag your framework." + "FrameworkTags": "The tags to assign to your framework." }, "AWS::Backup::Framework ControlInputParameter": { "ParameterName": "The name of a parameter, for example, `BackupPlanFrequency` .", @@ -4174,23 +4173,23 @@ "Tags": "The tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. The tag value is optional, but it cannot be an empty string if you are creating or editing a framework from the console (though the value can be an empty string when included in a CloudFormation template).\n\nThe structure to assign a tag is: `[{\"Key\":\"string\",\"Value\":\"string\"}]` ." }, "AWS::Backup::Framework FrameworkControl": { - "ControlInputParameters": "A list of `ParameterName` and `ParameterValue` pairs.", + "ControlInputParameters": "The name/value pairs.", "ControlName": "The name of a control. This name is between 1 and 256 characters.", "ControlScope": "The scope of a control. The control scope defines what the control will evaluate. Three examples of control scopes are: a specific backup plan, all backup plans with a specific tag, or all backup plans.\n\nFor more information, see [`ControlScope` .](https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ControlScope.html)" }, "AWS::Backup::Framework Tag": { - "Key": "", - "Value": "" + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::Backup::ReportPlan": { "ReportDeliveryChannel": "Contains information about where and how to deliver your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports.", "ReportPlanDescription": "An optional description of the report plan with a maximum 1,024 characters.", "ReportPlanName": "The unique name of the report plan. This name is between 1 and 256 characters starting with a letter, and consisting of letters (a-z, A-Z), numbers (0-9), and underscores (_).", - "ReportPlanTags": "A list of tags to tag your report plan.", + "ReportPlanTags": "The tags to assign to your report plan.", "ReportSetting": "Identifies the report template for the report. Reports are built using a report template. The report templates are:\n\n`RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT`\n\nIf the report template is `RESOURCE_COMPLIANCE_REPORT` or `CONTROL_COMPLIANCE_REPORT` , this API resource also describes the report coverage by AWS Regions and frameworks." }, "AWS::Backup::ReportPlan ReportDeliveryChannel": { - "Formats": "A list of the format of your reports: `CSV` , `JSON` , or both. If not specified, the default format is `CSV` .", + "Formats": "The format of your reports: `CSV` , `JSON` , or both. If not specified, the default format is `CSV` .", "S3BucketName": "The unique name of the S3 bucket that receives your reports.", "S3KeyPrefix": "The prefix for where AWS Backup Audit Manager delivers your reports to Amazon S3. The prefix is this part of the following path: s3://your-bucket-name/ `prefix` /Backup/us-west-2/year/month/day/report-name. If not specified, there is no prefix." }, @@ -4202,8 +4201,8 @@ "ReportTemplate": "Identifies the report template for the report. Reports are built using a report template. The report templates are:\n\n`RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT`" }, "AWS::Backup::ReportPlan Tag": { - "Key": "The tag key (String). The key can't start with `aws:` .\n\nLength Constraints: Minimum length of 1. Maximum length of 128.\n\nPattern: `^(?![aA]{1}[wW]{1}[sS]{1}:)([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$`", - "Value": "The value of the key.\n\nLength Constraints: Maximum length of 256.\n\nPattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`" + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::Backup::RestoreTestingPlan": { "RecoveryPointSelection": "The specified criteria to assign a set of resources, such as recovery point types or backup vaults.", @@ -4221,8 +4220,8 @@ "SelectionWindowDays": "Accepted values are integers from 1 to 365." }, "AWS::Backup::RestoreTestingPlan Tag": { - "Key": "", - "Value": "" + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::Backup::RestoreTestingSelection": { "IamRoleArn": "The Amazon Resource Name (ARN) of the IAM role that AWS Backup uses to create the target resource; for example: `arn:aws:iam::123456789012:role/S3Access` .", @@ -4231,12 +4230,12 @@ "ProtectedResourceType": "The type of AWS resource included in a resource testing selection; for example, an Amazon EBS volume or an Amazon RDS database.", "RestoreMetadataOverrides": "You can override certain restore metadata keys by including the parameter `RestoreMetadataOverrides` in the body of `RestoreTestingSelection` . Key values are not case sensitive.\n\nSee the complete list of [restore testing inferred metadata](https://docs.aws.amazon.com/aws-backup/latest/devguide/restore-testing-inferred-metadata.html) .", "RestoreTestingPlanName": "Unique string that is the name of the restore testing plan.\n\nThe name cannot be changed after creation. The name must consist of only alphanumeric characters and underscores. Maximum length is 50.", - "RestoreTestingSelectionName": "This is the unique name of the restore testing selection that belongs to the related restore testing plan.", + "RestoreTestingSelectionName": "The unique name of the restore testing selection that belongs to the related restore testing plan.", "ValidationWindowHours": "This is amount of hours (1 to 168) available to run a validation script on the data. The data will be deleted upon the completion of the validation script or the end of the specified retention period, whichever comes first." }, "AWS::Backup::RestoreTestingSelection KeyValue": { - "Key": "The tag key (String). The key can't start with `aws:` .\n\nLength Constraints: Minimum length of 1. Maximum length of 128.\n\nPattern: `^(?![aA]{1}[wW]{1}[sS]{1}:)([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$`", - "Value": "The value of the key.\n\nLength Constraints: Maximum length of 256.\n\nPattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`" + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::Backup::RestoreTestingSelection ProtectedResourceConditions": { "StringEquals": "Filters the values of your tagged resources for only those resources that you tagged with the same value. Also called \"exact matching.\"", @@ -4396,6 +4395,7 @@ "Requests": "The type and quantity of the resources to request for the container. The values vary based on the `name` that's specified. Resources can be requested by using either the `limits` or the `requests` objects.\n\n- **memory** - The memory hard limit (in MiB) for the container, using whole integers, with a \"Mi\" suffix. If your container attempts to exceed the memory specified, the container is terminated. You must specify at least 4 MiB of memory for a job. `memory` can be specified in `limits` , `requests` , or both. If `memory` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* .\n- **cpu** - The number of CPUs that are reserved for the container. Values must be an even multiple of `0.25` . `cpu` can be specified in `limits` , `requests` , or both. If `cpu` is specified in both, then the value that's specified in `limits` must be at least as large as the value that's specified in `requests` .\n- **nvidia.com/gpu** - The number of GPUs that are reserved for the container. Values must be a whole integer. `nvidia.com/gpu` can be specified in `limits` , `requests` , or both. If `nvidia.com/gpu` is specified in both, then the value that's specified in `limits` must be equal to the value that's specified in `requests` ." }, "AWS::Batch::JobDefinition EksContainerSecurityContext": { + "AllowPrivilegeEscalation": "Whether or not a container or a Kubernetes pod is allowed to gain more privileges than its parent process. The default value is `false` .", "Privileged": "When this parameter is `true` , the container is given elevated permissions on the host container instance. The level of permissions are similar to the `root` user permissions. The default value is `false` . This parameter maps to `privileged` policy in the [Privileged pod security policies](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#privileged) in the *Kubernetes documentation* .", "ReadOnlyRootFilesystem": "When this parameter is `true` , the container is given read-only access to its root file system. The default value is `false` . This parameter maps to `ReadOnlyRootFilesystem` policy in the [Volumes and file systems pod security policies](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#volumes-and-file-systems) in the *Kubernetes documentation* .", "RunAsGroup": "When this parameter is specified, the container is run as the specified group ID ( `gid` ). If this parameter isn't specified, the default is the group that's specified in the image metadata. This parameter maps to `RunAsGroup` and `MustRunAs` policy in the [Users and groups pod security policies](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#users-and-groups) in the *Kubernetes documentation* .", @@ -4443,6 +4443,9 @@ "AWS::Batch::JobDefinition FargatePlatformConfiguration": { "PlatformVersion": "The AWS Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the `LATEST` platform version is used by default. This uses a recent, approved version of the AWS Fargate platform for compute resources. For more information, see [AWS Fargate platform versions](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html) in the *Amazon Elastic Container Service Developer Guide* ." }, + "AWS::Batch::JobDefinition ImagePullSecret": { + "Name": "Provides a unique identifier for the `ImagePullSecret` . This object is required when `EksPodProperties$imagePullSecrets` is used." + }, "AWS::Batch::JobDefinition LinuxParameters": { "Devices": "Any of the host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't provide it for these jobs.", "InitProcessEnabled": "If true, run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`", @@ -4482,6 +4485,7 @@ "Containers": "The properties of the container that's used on the Amazon EKS pod.", "DnsPolicy": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", "HostNetwork": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "ImagePullSecrets": "", "InitContainers": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", "Metadata": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", "ServiceAccountName": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", @@ -4583,6 +4587,164 @@ "ShareIdentifier": "A fair share identifier or fair share identifier prefix. If the string ends with an asterisk (*), this entry specifies the weight factor to use for fair share identifiers that start with that prefix. The list of fair share identifiers in a fair share policy can't overlap. For example, you can't have one that specifies a `shareIdentifier` of `UserA*` and another that specifies a `shareIdentifier` of `UserA-1` .\n\nThere can be no more than 500 fair share identifiers active in a job queue.\n\nThe string is limited to 255 alphanumeric characters, and can be followed by an asterisk (*).", "WeightFactor": "The weight factor for the fair share identifier. The default value is 1.0. A lower value has a higher priority for compute resources. For example, jobs that use a share identifier with a weight factor of 0.125 (1/8) get 8 times the compute resources of jobs that use a share identifier with a weight factor of 1.\n\nThe smallest supported value is 0.0001, and the largest supported value is 999.9999." }, + "AWS::Bedrock::Agent": { + "ActionGroups": "The action groups that belong to an agent.", + "AgentName": "The name of the agent.", + "AgentResourceRoleArn": "The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the agent.", + "AutoPrepare": "Specifies whether to automatically update the `DRAFT` version of the agent after making changes to the agent. The `DRAFT` version can be continually iterated upon during internal development. By default, this value is `false` .", + "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the AWS KMS key that encrypts the agent.", + "Description": "The description of the agent.", + "FoundationModel": "The foundation model used for orchestration by the agent.", + "IdleSessionTTLInSeconds": "The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.\n\nA user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.", + "Instruction": "Instructions that tell the agent what it should do and how it should interact with users.", + "KnowledgeBases": "The knowledge bases associated with the agent.", + "PromptOverrideConfiguration": "Contains configurations to override prompt templates in different parts of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html) .", + "SkipResourceInUseCheckOnDelete": "Specifies whether to delete the resource even if it's in use. By default, this value is `false` .", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" + }, + "AWS::Bedrock::Agent APISchema": { + "Payload": "The JSON or YAML-formatted payload defining the OpenAPI schema for the action group. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "S3": "Contains details about the S3 object containing the OpenAPI schema for the action group. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) ." + }, + "AWS::Bedrock::Agent ActionGroupExecutor": { + "Lambda": "The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action." + }, + "AWS::Bedrock::Agent AgentActionGroup": { + "ActionGroupExecutor": "The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action.", + "ActionGroupName": "The name of the action group.", + "ActionGroupState": "Specifies whether the action group is available for the agent to invoke or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request.", + "ApiSchema": "Contains either details about the S3 object containing the OpenAPI schema for the action group or the JSON or YAML-formatted payload defining the schema. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "Description": "The description of the action group.", + "ParentActionGroupSignature": "If this field is set as `AMAZON.UserInput` , the agent can request the user for additional information when trying to complete a task. The `description` , `apiSchema` , and `actionGroupExecutor` fields must be blank for this action group.\n\nDuring orchestration, if the agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an [Observation](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_Observation.html) reprompting the user for more information.", + "SkipResourceInUseCheckOnDelete": "Specifies whether to delete the resource even if it's in use. By default, this value is `false` ." + }, + "AWS::Bedrock::Agent AgentKnowledgeBase": { + "Description": "The description of the association between the agent and the knowledge base.", + "KnowledgeBaseId": "The unique identifier of the association between the agent and the knowledge base.", + "KnowledgeBaseState": "Specifies whether to use the knowledge base or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request." + }, + "AWS::Bedrock::Agent InferenceConfiguration": { + "MaximumLength": "The maximum number of tokens to allow in the generated response.", + "StopSequences": "A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.", + "Temperature": "The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.", + "TopK": "While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for `topK` is the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set `topK` to 50, the model selects the next token from among the top 50 most likely choices.", + "TopP": "While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for `Top P` determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set `topP` to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens." + }, + "AWS::Bedrock::Agent PromptConfiguration": { + "BasePromptTemplate": "Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see [Prompt template placeholder variables](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-placeholders.html) .", + "InferenceConfiguration": "Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the `promptType` . For more information, see [Inference parameters for foundation models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html) .", + "ParserMode": "Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the `promptType` . If you set the field as `OVERRIDEN` , the `overrideLambda` field in the [PromptOverrideConfiguration](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_PromptOverrideConfiguration.html) must be specified with the ARN of a Lambda function.", + "PromptCreationMode": "Specifies whether to override the default prompt template for this `promptType` . Set this value to `OVERRIDDEN` to use the prompt that you provide in the `basePromptTemplate` . If you leave it as `DEFAULT` , the agent uses a default prompt template.", + "PromptState": "Specifies whether to allow the agent to carry out the step specified in the `promptType` . If you set this value to `DISABLED` , the agent skips that step. The default state for each `promptType` is as follows.\n\n- `PRE_PROCESSING` \u2013 `ENABLED`\n- `ORCHESTRATION` \u2013 `ENABLED`\n- `KNOWLEDGE_BASE_RESPONSE_GENERATION` \u2013 `ENABLED`\n- `POST_PROCESSING` \u2013 `DISABLED`", + "PromptType": "The step in the agent sequence that this prompt configuration applies to." + }, + "AWS::Bedrock::Agent PromptOverrideConfiguration": { + "OverrideLambda": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` .", + "PromptConfigurations": "Contains configurations to override a prompt template in one part of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html) ." + }, + "AWS::Bedrock::Agent S3Identifier": { + "S3BucketName": "The name of the S3 bucket.", + "S3ObjectKey": "The S3 object key containing the resource." + }, + "AWS::Bedrock::AgentAlias": { + "AgentAliasName": "The name of the alias of the agent.", + "AgentId": "The unique identifier of the agent.", + "Description": "The description of the alias of the agent.", + "RoutingConfiguration": "Contains details about the routing configuration of the alias.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" + }, + "AWS::Bedrock::AgentAlias AgentAliasHistoryEvent": { + "EndDate": "The date that the alias stopped being associated to the version in the `routingConfiguration` object", + "RoutingConfiguration": "Contains details about the version of the agent with which the alias is associated.", + "StartDate": "The date that the alias began being associated to the version in the `routingConfiguration` object." + }, + "AWS::Bedrock::AgentAlias AgentAliasRoutingConfigurationListItem": { + "AgentVersion": "The version of the agent with which the alias is associated." + }, + "AWS::Bedrock::DataSource": { + "DataSourceConfiguration": "Contains details about how the data source is stored.", + "Description": "The description of the data source.", + "KnowledgeBaseId": "The unique identifier of the knowledge base to which the data source belongs.", + "Name": "The name of the data source.", + "ServerSideEncryptionConfiguration": "Contains details about the configuration of the server-side encryption.", + "VectorIngestionConfiguration": "Contains details about how to ingest the documents in the data source." + }, + "AWS::Bedrock::DataSource ChunkingConfiguration": { + "ChunkingStrategy": "Knowledge base can split your source data into chunks. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for `NONE` , then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.\n\n- `FIXED_SIZE` \u2013 Amazon Bedrock splits your source data into chunks of the approximate size that you set in the `fixedSizeChunkingConfiguration` .\n- `NONE` \u2013 Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.", + "FixedSizeChunkingConfiguration": "Configurations for when you choose fixed-size chunking. If you set the `chunkingStrategy` as `NONE` , exclude this field." + }, + "AWS::Bedrock::DataSource DataSourceConfiguration": { + "S3Configuration": "Contains details about the configuration of the S3 object containing the data source.", + "Type": "The type of storage for the data source." + }, + "AWS::Bedrock::DataSource FixedSizeChunkingConfiguration": { + "MaxTokens": "The maximum number of tokens to include in a chunk.", + "OverlapPercentage": "The percentage of overlap between adjacent chunks of a data source." + }, + "AWS::Bedrock::DataSource S3DataSourceConfiguration": { + "BucketArn": "The Amazon Resource Name (ARN) of the bucket that contains the data source.", + "InclusionPrefixes": "A list of S3 prefixes that define the object containing the data sources. For more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) ." + }, + "AWS::Bedrock::DataSource ServerSideEncryptionConfiguration": { + "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS KMS key used to encrypt the resource." + }, + "AWS::Bedrock::DataSource VectorIngestionConfiguration": { + "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried." + }, + "AWS::Bedrock::KnowledgeBase": { + "Description": "The description of the knowledge base.", + "KnowledgeBaseConfiguration": "Contains details about the embeddings configuration of the knowledge base.", + "Name": "The name of the knowledge base.", + "RoleArn": "The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.", + "StorageConfiguration": "Contains details about the storage configuration of the knowledge base.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" + }, + "AWS::Bedrock::KnowledgeBase KnowledgeBaseConfiguration": { + "Type": "The type of data that the data source is converted into for the knowledge base.", + "VectorKnowledgeBaseConfiguration": "Contains details about the embeddings model that'sused to convert the data source." + }, + "AWS::Bedrock::KnowledgeBase OpenSearchServerlessConfiguration": { + "CollectionArn": "The Amazon Resource Name (ARN) of the OpenSearch Service vector store.", + "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", + "VectorIndexName": "The name of the vector store." + }, + "AWS::Bedrock::KnowledgeBase OpenSearchServerlessFieldMapping": { + "MetadataField": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "TextField": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." + }, + "AWS::Bedrock::KnowledgeBase PineconeConfiguration": { + "ConnectionString": "The endpoint URL for your index management page.", + "CredentialsSecretArn": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that is linked to your Pinecone API key.", + "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", + "Namespace": "The namespace to be used to write new data to your database." + }, + "AWS::Bedrock::KnowledgeBase PineconeFieldMapping": { + "MetadataField": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "TextField": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose." + }, + "AWS::Bedrock::KnowledgeBase RdsConfiguration": { + "CredentialsSecretArn": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that is linked to your Amazon RDS database.", + "DatabaseName": "The name of your Amazon RDS database.", + "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", + "ResourceArn": "The Amazon Resource Name (ARN) of the vector store.", + "TableName": "The name of the table in the database." + }, + "AWS::Bedrock::KnowledgeBase RdsFieldMapping": { + "MetadataField": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "PrimaryKeyField": "The name of the field in which Amazon Bedrock stores the ID for each entry.", + "TextField": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." + }, + "AWS::Bedrock::KnowledgeBase StorageConfiguration": { + "OpensearchServerlessConfiguration": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.", + "PineconeConfiguration": "Contains the storage configuration of the knowledge base in Pinecone.", + "RdsConfiguration": "Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see [Create a vector index in Amazon RDS](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html) .", + "Type": "The vector store service in which the knowledge base is stored." + }, + "AWS::Bedrock::KnowledgeBase VectorKnowledgeBaseConfiguration": { + "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base." + }, "AWS::BillingConductor::BillingGroup": { "AccountGrouping": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", "ComputationPreference": "The preferences and settings that will be used to compute the AWS charges for a billing group.", @@ -4923,7 +5085,7 @@ "LoggingLevel": "Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\nLogging levels include `ERROR` , `INFO` , or `NONE` .", "SnsTopicArns": "The ARNs of the SNS topics that deliver notifications to AWS Chatbot .", "TeamId": "The ID of the Microsoft Team authorized with AWS Chatbot .\n\nTo get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", - "TeamsChannelId": "", + "TeamsChannelId": "The ID of the Microsoft Teams channel.\n\nTo get the channel ID, open Microsoft Teams, right click on the channel name in the left pane, then choose Copy. An example of the channel ID syntax is: `19%3ab6ef35dc342d56ba5654e6fc6d25a071%40thread.tacv2` .", "TeamsTenantId": "The ID of the Microsoft Teams tenant.\n\nTo get the tenant ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the tenant ID from the console. For more details, see steps 1-4 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", "UserRoleRequired": "Enables use of a user role requirement in your chat configuration." }, @@ -5044,7 +5206,7 @@ "List": "Analysis rule type that enables only list queries on a configured table." }, "AWS::CleanRooms::ConfiguredTable DifferentialPrivacy": { - "Columns": "" + "Columns": "The name of the column, such as user_id, that contains the unique identifier of your users, whose privacy you want to protect. If you want to turn on differential privacy for two or more tables in a collaboration, you must configure the same column as the user identifier column in both analysis rules." }, "AWS::CleanRooms::ConfiguredTable DifferentialPrivacyColumn": { "Name": "The name of the column, such as user_id, that contains the unique identifier of your users, whose privacy you want to protect. If you want to turn on differential privacy for two or more tables in a collaboration, you must configure the same column as the user identifier column in both analysis rules." @@ -5101,6 +5263,52 @@ "Key": "The key of the tag.", "Value": "The value of the tag." }, + "AWS::CleanRooms::PrivacyBudgetTemplate": { + "AutoRefresh": "How often the privacy budget refreshes.\n\n> If you plan to regularly bring new data into the collaboration, use `CALENDAR_MONTH` to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queried across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.", + "MembershipIdentifier": "The identifier for a membership resource.", + "Parameters": "Specifies the epislon and noise parameters for the privacy budget template.", + "PrivacyBudgetType": "Specifies the type of the privacy budget template.", + "Tags": "" + }, + "AWS::CleanRooms::PrivacyBudgetTemplate Parameters": { + "Epsilon": "The epsilon value that you want to use.", + "UsersNoisePerQuery": "Noise added per query is measured in terms of the number of users whose contributions you want to obscure. This value governs the rate at which the privacy budget is depleted." + }, + "AWS::CleanRooms::PrivacyBudgetTemplate Tag": { + "Key": "The key name of the tag. You can specify a value that's 1 to 128 Unicode characters in length and can't be prefixed with `aws:` . digits, whitespace, `_` , `.` , `:` , `/` , `=` , `+` , `@` , `-` , and `\"` .\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html)", + "Value": "The value for the tag. You can specify a value that's 1 to 256 characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." + }, + "AWS::CleanRoomsML::TrainingDataset": { + "Description": "The description of the training dataset.", + "Name": "The name of the training dataset.", + "RoleArn": "The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the `dataSource` field of each dataset.\n\nPassing a role across accounts is not allowed. If you pass a role that isn't in your account, you get an `AccessDeniedException` error.", + "Tags": "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50.\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8.\n- Maximum value length - 256 Unicode characters in UTF-8.\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for keys as it is reserved. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has `aws` as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of `aws` do not count against your tags per resource limit.", + "TrainingData": "An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables." + }, + "AWS::CleanRoomsML::TrainingDataset ColumnSchema": { + "ColumnName": "The name of a column.", + "ColumnTypes": "The data type of column." + }, + "AWS::CleanRoomsML::TrainingDataset DataSource": { + "GlueDataSource": "A GlueDataSource object that defines the catalog ID, database name, and table name for the training data." + }, + "AWS::CleanRoomsML::TrainingDataset Dataset": { + "InputConfig": "A DatasetInputConfig object that defines the data source and schema mapping.", + "Type": "What type of information is found in the dataset." + }, + "AWS::CleanRoomsML::TrainingDataset DatasetInputConfig": { + "DataSource": "A DataSource object that specifies the Glue data source for the training data.", + "Schema": "The schema information for the training data." + }, + "AWS::CleanRoomsML::TrainingDataset GlueDataSource": { + "CatalogId": "The Glue catalog that contains the training data.", + "DatabaseName": "The Glue database that contains the training data.", + "TableName": "The Glue table that contains the training data." + }, + "AWS::CleanRoomsML::TrainingDataset Tag": { + "Key": "The key name of the tag. You can specify a value that's 1 to 128 Unicode characters in length and can't be prefixed with `aws:` . digits, whitespace, `_` , `.` , `:` , `/` , `=` , `+` , `@` , `-` , and `\"` .\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html)", + "Value": "The value for the tag. You can specify a value that's 1 to 256 characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, `_` , `.` , `/` , `=` , `+` , and `-` .\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." + }, "AWS::Cloud9::EnvironmentEC2": { "AutomaticStopTimeMinutes": "The number of minutes until the running instance is shut down after the environment was last used.", "ConnectionType": "The connection type used for connecting to an Amazon EC2 environment. Valid values are `CONNECT_SSH` (default) and `CONNECT_SSM` (connected through AWS Systems Manager ).", @@ -5781,7 +5989,7 @@ "Value": "The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters." }, "AWS::CloudTrail::EventDataStore": { - "AdvancedEventSelectors": "The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.\n\nFor more information about how to use advanced event selectors to log CloudTrail events, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include AWS Config configuration items in your event data store, see [Create an event data store for AWS Config configuration items](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-lake-cli.html#lake-cli-create-eds-config) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include non- AWS events in your event data store, see [Create an integration to log events from outside AWS](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-lake-cli.html#lake-cli-create-integration) in the CloudTrail User Guide.", + "AdvancedEventSelectors": "The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.\n\nFor more information about how to use advanced event selectors to log CloudTrail events, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include AWS Config configuration items in your event data store, see [Create an event data store for AWS Config configuration items](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/lake-eds-cli.html#lake-cli-create-eds-config) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include events outside of AWS events in your event data store, see [Create an integration to log events from outside AWS](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/lake-integrations-cli.html#lake-cli-create-integration) in the CloudTrail User Guide.", "BillingMode": "The billing mode for the event data store determines the cost for ingesting events and the default and maximum retention period for the event data store.\n\nThe following are the possible values:\n\n- `EXTENDABLE_RETENTION_PRICING` - This billing mode is generally recommended if you want a flexible retention period of up to 3653 days (about 10 years). The default retention period for this billing mode is 366 days.\n- `FIXED_RETENTION_PRICING` - This billing mode is recommended if you expect to ingest more than 25 TB of event data per month and need a retention period of up to 2557 days (about 7 years). The default retention period for this billing mode is 2557 days.\n\nThe default value is `EXTENDABLE_RETENTION_PRICING` .\n\nFor more information about CloudTrail pricing, see [AWS CloudTrail Pricing](https://docs.aws.amazon.com/cloudtrail/pricing/) and [Managing CloudTrail Lake costs](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-lake-manage-costs.html) .", "FederationEnabled": "Indicates if [Lake query federation](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-federation.html) is enabled. By default, Lake query federation is disabled. You cannot delete an event data store if Lake query federation is enabled.", "FederationRoleArn": "If Lake query federation is enabled, provides the ARN of the federation role used to access the resources for the federated event data store.\n\nThe federation role must exist in your account and provide the [required minimum permissions](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-federation.html#query-federation-permissions-role) .", @@ -5832,8 +6040,8 @@ "IsMultiRegionTrail": "Specifies whether the trail applies only to the current Region or to all Regions. The default is false. If the trail exists only in the current Region and this value is set to true, shadow trails (replications of the trail) will be created in the other Regions. If the trail exists in all Regions and this value is set to false, the trail will remain in the Region where it was created, and its shadow trails in other Regions will be deleted. As a best practice, consider using trails that log events in all Regions.", "IsOrganizationTrail": "Specifies whether the trail is applied to all accounts in an organization in AWS Organizations , or only for the current AWS account . The default is false, and cannot be true unless the call is made on behalf of an AWS account that is the management account for an organization in AWS Organizations . If the trail is not an organization trail and this is set to `true` , the trail will be created in all AWS accounts that belong to the organization. If the trail is an organization trail and this is set to `false` , the trail will remain in the current AWS account but be deleted from all member accounts in the organization.\n\n> Only the management account for the organization can convert an organization trail to a non-organization trail, or convert a non-organization trail to an organization trail.", "KMSKeyId": "Specifies the AWS KMS key ID to use to encrypt the logs delivered by CloudTrail. The value can be an alias name prefixed by \"alias/\", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.\n\nCloudTrail also supports AWS KMS multi-Region keys. For more information about multi-Region keys, see [Using multi-Region keys](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) in the *AWS Key Management Service Developer Guide* .\n\nExamples:\n\n- alias/MyAliasName\n- arn:aws:kms:us-east-2:123456789012:alias/MyAliasName\n- arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012\n- 12345678-1234-1234-1234-123456789012", - "S3BucketName": "Specifies the name of the Amazon S3 bucket designated for publishing log files. See [Amazon S3 Bucket Naming Requirements](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html) .", - "S3KeyPrefix": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see [Finding Your CloudTrail Log Files](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html) . The maximum length is 200 characters.", + "S3BucketName": "Specifies the name of the Amazon S3 bucket designated for publishing log files. See [Amazon S3 Bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .", + "S3KeyPrefix": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see [Finding Your CloudTrail Log Files](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/get-and-view-cloudtrail-log-files.html#cloudtrail-find-log-files) . The maximum length is 200 characters.", "SnsTopicName": "Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.", "Tags": "A custom set of tags (key-value pairs) for this trail.", "TrailName": "Specifies the name of the trail. The name must meet the following requirements:\n\n- Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)\n- Start with a letter or number, and end with a letter or number\n- Be between 3 and 128 characters\n- Have no adjacent periods, underscores or dashes. Names like `my-_namespace` and `my--namespace` are not valid.\n- Not be in IP address format (for example, 192.168.5.4)" @@ -5886,6 +6094,7 @@ "OKActions": "The actions to execute when this alarm transitions to the `OK` state from any other state. Each action is specified as an Amazon Resource Name (ARN).", "Period": "The period, in seconds, over which the statistic is applied. This is required for an alarm based on a metric. Valid values are 10, 30, 60, and any multiple of 60.\n\nFor an alarm based on a math expression, you can't specify `Period` , and instead you use the `Metrics` parameter.\n\n*Minimum:* 10", "Statistic": "The statistic for the metric associated with the alarm, other than percentile. For percentile statistics, use `ExtendedStatistic` .\n\nFor an alarm based on a metric, you must specify either `Statistic` or `ExtendedStatistic` but not both.\n\nFor an alarm based on a math expression, you can't specify `Statistic` . Instead, you use `Metrics` .", + "Tags": "A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.", "Threshold": "The value to compare with the specified statistic.", "ThresholdMetricId": "In an alarm based on an anomaly detection model, this is the ID of the `ANOMALY_DETECTION_BAND` function used as the threshold for the alarm.", "TreatMissingData": "Sets how this alarm is to handle missing data points. Valid values are `breaching` , `notBreaching` , `ignore` , and `missing` . For more information, see [Configuring How CloudWatch Alarms Treat Missing Data](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data) in the *Amazon CloudWatch User Guide* .\n\nIf you omit this parameter, the default behavior of `missing` is used.", @@ -5915,9 +6124,14 @@ "Stat": "The statistic to return. It can include any CloudWatch statistic or extended statistic. For a list of valid values, see the table in [Statistics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Statistic) in the *Amazon CloudWatch User Guide* .", "Unit": "The unit to use for the returned data points.\n\nValid values are: Seconds, Microseconds, Milliseconds, Bytes, Kilobytes, Megabytes, Gigabytes, Terabytes, Bits, Kilobits, Megabits, Gigabits, Terabits, Percent, Count, Bytes/Second, Kilobytes/Second, Megabytes/Second, Gigabytes/Second, Terabytes/Second, Bits/Second, Kilobits/Second, Megabits/Second, Gigabits/Second, Terabits/Second, Count/Second, or None." }, + "AWS::CloudWatch::Alarm Tag": { + "Key": "A string that you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources.", + "Value": "The value for the specified tag key." + }, "AWS::CloudWatch::AnomalyDetector": { "Configuration": "Specifies details about how the anomaly detection model is to be trained, including time ranges to exclude when training and updating the model. The configuration can also include the time zone to use for the metric.", "Dimensions": "The dimensions of the metric associated with the anomaly detection band.", + "MetricCharacteristics": "Use this object to include parameters to provide information about your metric to CloudWatch to help it build more accurate anomaly detection models. Currently, it includes the `PeriodicSpikes` parameter.", "MetricMathAnomalyDetector": "The CloudWatch metric math expression for this anomaly detector.", "MetricName": "The name of the metric associated with the anomaly detection band.", "Namespace": "The namespace of the metric associated with the anomaly detection band.", @@ -5937,6 +6151,9 @@ "MetricName": "The name of the metric. This is a required field.", "Namespace": "The namespace of the metric." }, + "AWS::CloudWatch::AnomalyDetector MetricCharacteristics": { + "PeriodicSpikes": "Set this parameter to true if values for this metric consistently include spikes that should not be considered to be anomalies. With this set to true, CloudWatch will expect to see spikes that occurred consistently during the model training period, and won't flag future similar spikes as anomalies." + }, "AWS::CloudWatch::AnomalyDetector MetricDataQuery": { "AccountId": "The ID of the account where the metrics are located.\n\nIf you are performing a `GetMetricData` operation in a monitoring account, use this to specify which account to retrieve this metric from.\n\nIf you are performing a `PutMetricAlarm` operation, use this to specify which account contains the metric that the alarm is watching.", "Expression": "This field can contain either a Metrics Insights query, or a metric math expression to be performed on the returned data. For more information about Metrics Insights queries, see [Metrics Insights query components and syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage) in the *Amazon CloudWatch User Guide* .\n\nA math expression can use the `Id` of the other metrics or queries to refer to those metrics, and can also use the `Id` of other expressions to use the result of those expressions. For more information about metric math expressions, see [Metric Math Syntax and Functions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html#metric-math-syntax) in the *Amazon CloudWatch User Guide* .\n\nWithin each MetricDataQuery object, you must specify either `Expression` or `MetricStat` but not both.", @@ -5976,7 +6193,12 @@ "AlarmName": "The name for the composite alarm. This name must be unique within your AWS account.", "AlarmRule": "An expression that specifies which other alarms are to be evaluated to determine this composite alarm's state. For each alarm that you reference, you designate a function that specifies whether that alarm needs to be in ALARM state, OK state, or INSUFFICIENT_DATA state. You can use operators (AND, OR and NOT) to combine multiple functions in a single expression. You can use parenthesis to logically group the functions in your expression.\n\nYou can use either alarm names or ARNs to reference the other alarms that are to be evaluated.\n\nFunctions can include the following:\n\n- ALARM(\"alarm-name or alarm-ARN\") is TRUE if the named alarm is in ALARM state.\n- OK(\"alarm-name or alarm-ARN\") is TRUE if the named alarm is in OK state.\n- INSUFFICIENT_DATA(\"alarm-name or alarm-ARN\") is TRUE if the named alarm is in INSUFFICIENT_DATA state.\n- TRUE always evaluates to TRUE.\n- FALSE always evaluates to FALSE.\n\nTRUE and FALSE are useful for testing a complex AlarmRule structure, and for testing your alarm actions.\n\nFor more information about `AlarmRule` syntax, see [PutCompositeAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutCompositeAlarm.html) in the *Amazon CloudWatch API Reference* .", "InsufficientDataActions": "The actions to execute when this alarm transitions to the INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Name (ARN). For more information about creating alarms and the actions that you can specify, see [PutCompositeAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutCompositeAlarm.html) in the *Amazon CloudWatch API Reference* .", - "OKActions": "The actions to execute when this alarm transitions to the OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). For more information about creating alarms and the actions that you can specify, see [PutCompositeAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutCompositeAlarm.html) in the *Amazon CloudWatch API Reference* ." + "OKActions": "The actions to execute when this alarm transitions to the OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). For more information about creating alarms and the actions that you can specify, see [PutCompositeAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutCompositeAlarm.html) in the *Amazon CloudWatch API Reference* .", + "Tags": "A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values." + }, + "AWS::CloudWatch::CompositeAlarm Tag": { + "Key": "A string that you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources.", + "Value": "The value for the specified tag key." }, "AWS::CloudWatch::Dashboard": { "DashboardBody": "The detailed information about the dashboard in JSON format, including the widgets to include and their location on the dashboard. This parameter is required.\n\nFor more information about the syntax, see [Dashboard Body Structure and Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html) .", @@ -6125,7 +6347,7 @@ }, "AWS::CodeBuild::Project Environment": { "Certificate": "The ARN of the Amazon S3 bucket, path prefix, and object key that contains the PEM-encoded certificate for the build project. For more information, see [certificate](https://docs.aws.amazon.com/codebuild/latest/userguide/create-project-cli.html#cli.environment.certificate) in the *AWS CodeBuild User Guide* .", - "ComputeType": "The type of compute environment. This determines the number of CPU cores and memory the build environment uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 15 GB memory and 8 vCPUs for builds.\n\nFor more information, see [Build Environment Compute Types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "ComputeType": "The type of compute environment. This determines the number of CPU cores and memory the build environment uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n- `BUILD_LAMBDA_1GB` : Use up to 1 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_2GB` : Use up to 2 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_4GB` : Use up to 4 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_8GB` : Use up to 8 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_10GB` : Use up to 10 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n\nFor more information, see [Build Environment Compute Types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "EnvironmentVariables": "A set of environment variables to make available to builds for this build project.", "Fleet:": "", "Image": "The image tag or image digest that identifies the Docker image to use for this build project. Use the following formats:\n\n- For an image tag: `/:` . For example, in the Docker repository that CodeBuild uses to manage its Docker images, this would be `aws/codebuild/standard:4.0` .\n- For an image digest: `/@` . For example, to specify an image with the digest \"sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf,\" use `/@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf` .\n\nFor more information, see [Docker images provided by CodeBuild](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-available.html) in the *AWS CodeBuild user guide* .", @@ -6193,7 +6415,7 @@ "GitCloneDepth": "The depth of history to download. Minimum value is 0. If this value is 0, greater than 25, or not provided, then the full history is downloaded with each build project. If your source type is Amazon S3, this value is not supported.", "GitSubmodulesConfig": "Information about the Git submodules configuration for the build project.", "InsecureSsl": "This is used with GitHub Enterprise only. Set to true to ignore SSL warnings while connecting to your GitHub Enterprise project repository. The default value is `false` . `InsecureSsl` should be used for testing purposes only. It should not be used in a production environment.", - "Location": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeStar Connections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", + "Location": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeConnections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", "ReportBuildStatus": "Set to true to report the status of a build's start and finish to your source provider. This option is valid only when your source provider is GitHub, GitHub Enterprise, or Bitbucket. If this is set and you use a different source provider, an `invalidInputException` is thrown.", "SourceIdentifier": "An identifier for this project source. The identifier can only contain alphanumeric characters and underscores, and must be less than 128 characters in length.", "Type": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `GITLAB` : The source code is in a GitLab repository.\n- `GITLAB_SELF_MANAGED` : The source code is in a self-managed GitLab repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket." @@ -6214,7 +6436,7 @@ "AWS::CodeBuild::Project WebhookFilter": { "ExcludeMatchedPattern": "Used to indicate that the `pattern` determines which webhook events do not trigger a build. If true, then a webhook event that does not match the `pattern` triggers a build. If false, then a webhook event that matches the `pattern` triggers a build.", "Pattern": "For a `WebHookFilter` that uses `EVENT` type, a comma-separated string that specifies one or more events. For example, the webhook filter `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` allows all push, pull request created, and pull request updated events to trigger a build.\n\nFor a `WebHookFilter` that uses any of the other filter types, a regular expression pattern. For example, a `WebHookFilter` that uses `HEAD_REF` for its `type` and the pattern `^refs/heads/` triggers a build when the head reference is a branch with a reference name `refs/heads/branch-name` .", - "Type": "The type of webhook filter. There are eight webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , and `RELEASE_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of eight event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , and `PRERELEASED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` and `PRERELEASED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only." + "Type": "The type of webhook filter. There are nine webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , `RELEASE_NAME` , and `WORKFLOW_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of nine event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , `PRERELEASED` , and `WORKFLOW_JOB_QUEUED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` , `PRERELEASED` , and `WORKFLOW_JOB_QUEUED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- WORKFLOW_NAME\n\n- A webhook triggers a build when the workflow name matches the regular expression `pattern` .\n\n> Works with `WORKFLOW_JOB_QUEUED` events only." }, "AWS::CodeBuild::ReportGroup": { "DeleteReports": "When deleting a report group, specifies if reports within the report group should be deleted.\n\n- **true** - Deletes any reports that belong to the report group before deleting the report group.\n- **false** - You must delete any reports in the report group. This is the default value. If you delete a report group that contains one or more reports, an exception is thrown.", @@ -6242,7 +6464,7 @@ "AWS::CodeBuild::SourceCredential": { "AuthType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", "ServerType": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET.", - "Token": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is the app password.", + "Token": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password.", "Username": "The Bitbucket username when the `authType` is BASIC_AUTH. This parameter is not valid for other types of source providers or connections." }, "AWS::CodeCommit::Repository": { @@ -6273,6 +6495,16 @@ "Key": "The tag's key.", "Value": "The tag's value." }, + "AWS::CodeConnections::Connection": { + "ConnectionName": "The name of the connection. Connection names must be unique in an AWS account .", + "HostArn": "The Amazon Resource Name (ARN) of the host associated with the connection.", + "ProviderType": "The name of the external provider where your third-party code repository is configured.", + "Tags": "" + }, + "AWS::CodeConnections::Connection Tag": { + "Key": "The tag's key.", + "Value": "The tag's value." + }, "AWS::CodeDeploy::Application": { "ApplicationName": "A name for the application. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the application name. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\n> Updates to `ApplicationName` are not supported.", "ComputePlatform": "The compute platform that CodeDeploy deploys the application to.", @@ -8553,7 +8785,7 @@ "AWS::DMS::Endpoint PostgreSqlSettings": { "AfterConnectScript": "For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.\n\nExample: `afterConnectScript=SET session_replication_role='replica'`", "BabelfishDatabaseName": "The Babelfish for Aurora PostgreSQL database name for the endpoint.", - "CaptureDdls": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nThe default value is `true` .\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", + "CaptureDdls": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nIf this value is set to `True` , you don't have to create tables or triggers on the source database.", "DatabaseMode": "Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.", "DdlArtifactsSchema": "The schema in which the operational DDL database artifacts are created.\n\nThe default value is `public` .\n\nExample: `ddlArtifactsSchema=xyzddlschema;`", "ExecuteTimeout": "Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds.\n\nExample: `executeTimeout=100;`", @@ -9399,7 +9631,7 @@ "AgentArns": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.", "BucketName": "Specifies the name of the object storage bucket involved in the transfer.", "SecretKey": "Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server.", - "ServerCertificate": "Specifies a file with the certificates that are used to sign the object storage server's certificate (for example, `file:///home/user/.ssh/storage_sys_certificate.pem` ). The file you specify must include the following:\n\n- The certificate of the signing certificate authority (CA)\n- Any intermediate certificates\n- base64 encoding\n- A `.pem` extension\n\nThe file can be up to 32768 bytes (before base64 encoding).\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", + "ServerCertificate": "Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single `.pem` file with a full certificate chain (for example, `file:///home/user/.ssh/object_storage_certificates.pem` ).\n\nThe certificate chain might include:\n\n- The object storage system's certificate\n- All intermediate certificates (if there are any)\n- The root certificate of the signing CA\n\nYou can concatenate your certificates into a `.pem` file (which can be up to 32768 bytes before base64 encoding). The following example `cat` command creates an `object_storage_certificates.pem` file that includes three certificates:\n\n`cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem`\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", "ServerHostname": "Specifies the domain name or IP address of the object storage server. A DataSync agent uses this hostname to mount the object storage server in a network.", "ServerPort": "Specifies the port that your object storage server accepts inbound network traffic on (for example, port 443).", "ServerProtocol": "Specifies the protocol that your object storage server uses to communicate.", @@ -9579,6 +9811,7 @@ "TypeRevision": "The revision of the metadata form type." }, "AWS::DataZone::DataSource GlueRunConfigurationInput": { + "AutoImportDataQualityResult": "", "DataAccessRole": "The data access role included in the configuration details of the AWS Glue data source.", "RelationalFilterConfigurations": "The relational filter configurations included in the configuration details of the AWS Glue data source." }, @@ -9689,6 +9922,155 @@ "Content": "The content of the subscription target configuration.", "FormName": "The form name included in the subscription target configuration." }, + "AWS::Deadline::Farm": { + "Description": "A description of the farm that helps identify what the farm is used for.", + "DisplayName": "The display name of the farm.", + "KmsKeyArn": "The ARN for the KMS key." + }, + "AWS::Deadline::Fleet": { + "Configuration": "The configuration details for the fleet.", + "Description": "A description that helps identify what the fleet is used for.", + "DisplayName": "The display name of the fleet summary to update.", + "FarmId": "The farm ID.", + "MaxWorkerCount": "The maximum number of workers specified in the fleet.", + "MinWorkerCount": "The minimum number of workers in the fleet.", + "RoleArn": "The IAM role that workers in the fleet use when processing jobs." + }, + "AWS::Deadline::Fleet AcceleratorCountRange": { + "Max": "The maximum GPU for the accelerator.", + "Min": "The minimum GPU for the accelerator." + }, + "AWS::Deadline::Fleet AcceleratorTotalMemoryMiBRange": { + "Max": "The maximum amount of memory to use for the accelerator, measured in MiB.", + "Min": "The minimum amount of memory to use for the accelerator, measured in MiB." + }, + "AWS::Deadline::Fleet CustomerManagedFleetConfiguration": { + "Mode": "The AWS Auto Scaling mode for the customer managed fleet configuration.", + "StorageProfileId": "The storage profile ID.", + "WorkerCapabilities": "The worker capabilities for a customer managed fleet configuration." + }, + "AWS::Deadline::Fleet CustomerManagedWorkerCapabilities": { + "AcceleratorCount": "The range of the accelerator.", + "AcceleratorTotalMemoryMiB": "The total memory (MiB) for the customer managed worker capabilities.", + "AcceleratorTypes": "The accelerator types for the customer managed worker capabilities.", + "CpuArchitectureType": "The CPU architecture type for the customer managed worker capabilities.", + "CustomAmounts": "Custom requirement ranges for customer managed worker capabilities.", + "CustomAttributes": "Custom attributes for the customer manged worker capabilities.", + "MemoryMiB": "The memory (MiB).", + "OsFamily": "The operating system (OS) family.", + "VCpuCount": "The vCPU count for the customer manged worker capabilities." + }, + "AWS::Deadline::Fleet Ec2EbsVolume": { + "Iops": "The IOPS per volume.", + "SizeGiB": "The EBS volume size in GiB.", + "ThroughputMiB": "The throughput per volume in MiB." + }, + "AWS::Deadline::Fleet FleetAmountCapability": { + "Max": "The maximum amount of the fleet worker capability.", + "Min": "The minimum amount of fleet worker capability.", + "Name": "The name of the fleet capability." + }, + "AWS::Deadline::Fleet FleetAttributeCapability": { + "Name": "The name of the fleet attribute capability for the worker.", + "Values": "The number of fleet attribute capabilities." + }, + "AWS::Deadline::Fleet FleetCapabilities": { + "Amounts": "Amount capabilities of the fleet.", + "Attributes": "Attribute capabilities of the fleet." + }, + "AWS::Deadline::Fleet FleetConfiguration": { + "CustomerManaged": "The customer managed fleets within a fleet configuration.", + "ServiceManagedEc2": "The service managed Amazon EC2 instances for a fleet configuration." + }, + "AWS::Deadline::Fleet MemoryMiBRange": { + "Max": "The maximum amount of memory (in MiB).", + "Min": "The minimum amount of memory (in MiB)." + }, + "AWS::Deadline::Fleet ServiceManagedEc2FleetConfiguration": { + "InstanceCapabilities": "The Amazon EC2 instance capabilities.", + "InstanceMarketOptions": "The Amazon EC2 market type." + }, + "AWS::Deadline::Fleet ServiceManagedEc2InstanceCapabilities": { + "AllowedInstanceTypes": "The allowable Amazon EC2 instance types.", + "CpuArchitectureType": "The CPU architecture type.", + "CustomAmounts": "The custom capability amounts to require for instances in this fleet.", + "CustomAttributes": "The custom capability attributes to require for instances in this fleet.", + "ExcludedInstanceTypes": "The instance types to exclude from the fleet.", + "MemoryMiB": "The memory, as MiB, for the Amazon EC2 instance type.", + "OsFamily": "The operating system (OS) family.", + "RootEbsVolume": "The root EBS volume.", + "VCpuCount": "The amount of vCPU to require for instances in this fleet." + }, + "AWS::Deadline::Fleet ServiceManagedEc2InstanceMarketOptions": { + "Type": "The Amazon EC2 instance type." + }, + "AWS::Deadline::Fleet VCpuCountRange": { + "Max": "The maximum amount of vCPU.", + "Min": "The minimum amount of vCPU." + }, + "AWS::Deadline::LicenseEndpoint": { + "SecurityGroupIds": "The identifier of the Amazon EC2 security group that controls access to the license endpoint.", + "SubnetIds": "Identifies the VPC subnets that can connect to a license endpoint.", + "VpcId": "The VCP(virtual private cloud) ID associated with the license endpoint." + }, + "AWS::Deadline::MeteredProduct": { + "Family": "The family to which the metered product belongs.", + "LicenseEndpointId": "The Amazon EC2 identifier of the license endpoint.", + "Port": "The port on which the metered product should run.", + "ProductId": "The product ID.", + "Vendor": "The vendor." + }, + "AWS::Deadline::Queue": { + "AllowedStorageProfileIds": "The identifiers of the storage profiles that this queue can use to share assets between workers using different operating systems.", + "DefaultBudgetAction": "The default action taken on a queue summary if a budget wasn't configured.", + "Description": "A description of the queue that helps identify what the queue is used for.", + "DisplayName": "The display name of the queue summary to update.", + "FarmId": "The farm ID.", + "JobAttachmentSettings": "The job attachment settings. These are the Amazon S3 bucket name and the Amazon S3 prefix.", + "JobRunAsUser": "Identifies the user for a job.", + "RequiredFileSystemLocationNames": "The file system location that the queue uses.", + "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that workers use when running jobs in this queue." + }, + "AWS::Deadline::Queue JobAttachmentSettings": { + "RootPrefix": "The root prefix.", + "S3BucketName": "The Amazon S3 bucket name." + }, + "AWS::Deadline::Queue JobRunAsUser": { + "Posix": "The user and group that the jobs in the queue run as.", + "RunAs": "Specifies whether the job should run using the queue's system user or if the job should run using the worker agent system user.", + "Windows": "Identifies a Microsoft Windows user." + }, + "AWS::Deadline::Queue PosixUser": { + "Group": "The name of the POSIX user's group.", + "User": "The name of the POSIX user." + }, + "AWS::Deadline::Queue WindowsUser": { + "PasswordArn": "The password ARN for the Windows user.", + "User": "The user." + }, + "AWS::Deadline::QueueEnvironment": { + "FarmId": "The identifier assigned to the farm that contains the queue.", + "Priority": "The queue environment's priority.", + "QueueId": "The unique identifier of the queue that contains the environment.", + "Template": "A JSON or YAML template the describes the processing environment for the queue.", + "TemplateType": "Specifies whether the template for the queue environment is JSON or YAML." + }, + "AWS::Deadline::QueueFleetAssociation": { + "FarmId": "The identifier of the farm that contains the queue and the fleet.", + "FleetId": "The fleet ID.", + "QueueId": "The queue ID." + }, + "AWS::Deadline::StorageProfile": { + "DisplayName": "The display name of the storage profile summary to update.", + "FarmId": "The unique identifier of the farm that contains the storage profile.", + "FileSystemLocations": "Operating system specific file system path to the storage location.", + "OsFamily": "The operating system (OS) family." + }, + "AWS::Deadline::StorageProfile FileSystemLocation": { + "Name": "The location name.", + "Path": "The file path.", + "Type": "The type of file." + }, "AWS::Detective::Graph": { "AutoEnableMembers": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", "Tags": "The tag values to assign to the new behavior graph." @@ -10283,6 +10665,8 @@ }, "AWS::EC2::CustomerGateway": { "BgpAsn": "For devices that support BGP, the customer gateway's BGP ASN.\n\nDefault: 65000", + "BgpAsnExtended": "", + "CertificateArn": "The Amazon Resource Name (ARN) for the customer gateway certificate.", "DeviceName": "The name of customer gateway device.", "IpAddress": "IPv4 address for the customer gateway device's outside interface. The address must be static.", "Tags": "One or more tags for the customer gateway.", @@ -10609,8 +10993,8 @@ "CreditSpecification": "The credit option for CPU usage of the burstable performance instance. Valid values are `standard` and `unlimited` . To change this attribute after launch, use [ModifyInstanceCreditSpecification](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html) . For more information, see [Burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) in the *Amazon EC2 User Guide* .\n\nDefault: `standard` (T2 instances) or `unlimited` (T3/T3a/T4g instances)\n\nFor T3 instances with `host` tenancy, only `standard` is supported.", "DisableApiTermination": "If you set this parameter to `true` , you can't terminate the instance using the Amazon EC2 console, CLI, or API; otherwise, you can. To change this attribute after launch, use [ModifyInstanceAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceAttribute.html) . Alternatively, if you set `InstanceInitiatedShutdownBehavior` to `terminate` , you can terminate the instance by running the shutdown command from the instance.\n\nDefault: `false`", "EbsOptimized": "Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.\n\nDefault: `false`", - "ElasticGpuSpecifications": "Deprecated.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.", - "ElasticInferenceAccelerators": "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "ElasticGpuSpecifications": "An elastic GPU to associate with the instance.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024.", + "ElasticInferenceAccelerators": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see [Amazon Elastic Inference FAQs](https://docs.aws.amazon.com/machine-learning/elastic-inference/faqs/) .", "EnclaveOptions": "Indicates whether the instance is enabled for AWS Nitro Enclaves.", "HibernationOptions": "Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the [hibernation prerequisites](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html) . For more information, see [Hibernate your instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the *Amazon EC2 User Guide* .\n\nYou can't enable hibernation and AWS Nitro Enclaves on the same instance.", "HostId": "If you specify host for the `Affinity` property, the ID of a dedicated host that the instance is associated with. If you don't specify an ID, Amazon EC2 launches the instance onto any available, compatible dedicated host in your account. This type of launch is called an untargeted launch. Note that for untargeted launches, you must have a compatible, dedicated host available to successfully launch instances.", @@ -10623,7 +11007,7 @@ "Ipv6Addresses": "The IPv6 addresses from the range of the subnet to associate with the primary network interface. You cannot specify this option and the option to assign a number of IPv6 addresses in the same request. You cannot specify this option if you've specified a minimum number of instances to launch.\n\nYou cannot specify this option and the network interfaces option in the same request.", "KernelId": "The ID of the kernel.\n\n> We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [PV-GRUB](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the *Amazon EC2 User Guide* .", "KeyName": "The name of the key pair. You can create a key pair using [CreateKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateKeyPair.html) or [ImportKeyPair](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportKeyPair.html) .\n\n> If you do not specify a key pair, you can't connect to the instance unless you choose an AMI that is configured to allow users another way to log in.", - "LaunchTemplate": "The launch template to use to launch the instances. Any parameters that you specify in the AWS CloudFormation template override the same parameters in the launch template. You can specify either the name or ID of a launch template, but not both.", + "LaunchTemplate": "The launch template. Any additional parameters that you specify for the new instance overwrite the corresponding parameters included in the launch template.", "LicenseSpecifications": "The license configurations.", "Monitoring": "Specifies whether detailed monitoring is enabled for the instance. Specify `true` to enable detailed monitoring. Otherwise, basic monitoring is enabled. For more information about detailed monitoring, see [Enable or turn off detailed monitoring for your instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) in the *Amazon EC2 User Guide* .", "NetworkInterfaces": "The network interfaces to associate with the instance.\n\n> If you use this property to point to a network interface, you must terminate the original interface before attaching a new one to allow the update of the instance to succeed.\n> \n> If this resource has a public IP address and is also in a VPC that is defined in the same template, you must use the [DependsOn Attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html) to declare a dependency on the VPC-gateway attachment.", @@ -10685,9 +11069,9 @@ "Ipv6Address": "The IPv6 address." }, "AWS::EC2::Instance LaunchTemplateSpecification": { - "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", - "LaunchTemplateName": "The name of the launch template.\n\nYou must specify the `LaunchTemplateName` or the `LaunchTemplateId` , but not both.", - "Version": "The version number of the launch template.\n\nSpecifying `$Latest` or `$Default` for the template version number is not supported. However, you can specify `LatestVersionNumber` or `DefaultVersionNumber` using the `Fn::GetAtt` intrinsic function. For more information, see [Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#aws-resource-ec2-launchtemplate-return-values-fn--getatt) ." + "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify either the launch template ID or the launch template name, but not both.", + "LaunchTemplateName": "The name of the launch template.\n\nYou must specify either the launch template ID or the launch template name, but not both.", + "Version": "The version number of the launch template. You must specify this property.\n\nTo specify the default version of the template, use the `Fn::GetAtt` intrinsic function to retrieve the `DefaultVersionNumber` attribute of the launch template. To specify the latest version of the template, use `Fn::GetAtt` to retrieve the `LatestVersionNumber` attribute. For more information, see [AWS::EC2:LaunchTemplate return values for Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#aws-resource-ec2-launchtemplate-return-values-fn--getatt) ." }, "AWS::EC2::Instance LicenseSpecification": { "LicenseConfigurationArn": "The Amazon Resource Name (ARN) of the license configuration." @@ -11440,7 +11824,6 @@ "DestinationSecurityGroupId": "The ID of the destination VPC security group.\n\nYou must specify exactly one of the following: `CidrIp` , `CidrIpv6` , `DestinationPrefixListId` , or `DestinationSecurityGroupId` .", "FromPort": "If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).", "IpProtocol": "The IP protocol name ( `tcp` , `udp` , `icmp` , `icmpv6` ) or number (see [Protocol Numbers](https://docs.aws.amazon.com/http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) ).\n\nUse `-1` to specify all protocols. When authorizing security group rules, specifying `-1` or a protocol number other than `tcp` , `udp` , `icmp` , or `icmpv6` allows traffic on all ports, regardless of any port range you specify. For `tcp` , `udp` , and `icmp` , you must specify a port range. For `icmpv6` , the port range is optional; if you omit the port range, traffic for all types and codes is allowed.", - "SourceSecurityGroupId": "", "ToPort": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes)." }, "AWS::EC2::SecurityGroup Ingress": { @@ -12239,6 +12622,24 @@ "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "A `value` acts as a descriptor within a tag category (key)." }, + "AWS::ECR::RepositoryCreationTemplate": { + "AppliedFor": "", + "Description": "", + "EncryptionConfiguration": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "ImageTagMutability": "", + "LifecyclePolicy": "", + "Prefix": "", + "RepositoryPolicy": "", + "ResourceTags": "The tags attached to the resource." + }, + "AWS::ECR::RepositoryCreationTemplate EncryptionConfiguration": { + "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "KmsKey": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used." + }, + "AWS::ECR::RepositoryCreationTemplate Tag": { + "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", + "Value": "A `value` acts as a descriptor within a tag category (key)." + }, "AWS::ECS::CapacityProvider": { "AutoScalingGroupProvider": "The Auto Scaling group settings for the capacity provider.", "Name": "The name of the capacity provider. If a name is specified, it cannot start with `aws` , `ecs` , or `fargate` . If no name is specified, a default name in the `CFNStackName-CFNResourceName-RandomString` format is used.", @@ -12470,7 +12871,7 @@ "PlacementConstraints": "An array of placement constraint objects to use for tasks.\n\n> This parameter isn't supported for tasks run on AWS Fargate .", "ProxyConfiguration": "The configuration details for the App Mesh proxy.\n\nYour Amazon ECS container instances require at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the `ecs-init` package to use a proxy configuration. If your container instances are launched from the Amazon ECS optimized AMI version `20190301` or later, they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .", "RequiresCompatibilities": "The task launch types the task definition was validated against. The valid values are `EC2` , `FARGATE` , and `EXTERNAL` . For more information, see [Amazon ECS launch types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide* .", - "RuntimePlatform": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.\n\nWhen you specify a task definition in a service, this value must match the `runtimePlatform` value of the service.", + "RuntimePlatform": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", "Tags": "The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value. You define both of them.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", "TaskRoleArn": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .", "Volumes": "The list of data volume definitions for the task. For more information, see [Using data volumes in tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> The `host` and `sourcePath` parameters aren't supported for tasks run on AWS Fargate ." @@ -12551,6 +12952,15 @@ "AWS::ECS::TaskDefinition EphemeralStorage": { "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB." }, + "AWS::ECS::TaskDefinition FSxAuthorizationConfig": { + "CredentialsParameter": "", + "Domain": "" + }, + "AWS::ECS::TaskDefinition FSxWindowsFileServerVolumeConfiguration": { + "AuthorizationConfig": "The authorization configuration details for the Amazon FSx for Windows File Server file system.", + "FileSystemId": "The Amazon FSx for Windows File Server file system ID to use.", + "RootDirectory": "The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host." + }, "AWS::ECS::TaskDefinition FirelensConfiguration": { "Options": "The options to use when configuring the log router. This field is optional and can be used to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event.\n\nIf specified, valid option keys are:\n\n- `enable-ecs-log-metadata` , which can be `true` or `false`\n- `config-file-type` , which can be `s3` or `file`\n- `config-file-value` , which is either an S3 ARN or a file path", "Type": "The log router to use. The valid values are `fluentd` or `fluentbit` ." @@ -12654,6 +13064,7 @@ "ConfiguredAtLaunch": "Indicates whether the volume should be configured at launch time. This is used to create Amazon EBS volumes for standalone tasks or tasks created as part of a service. Each task definition revision may only have one volume configured at launch in the volume configuration.\n\nTo configure a volume at launch time, use this task definition revision and specify a `volumeConfigurations` object when calling the `CreateService` , `UpdateService` , `RunTask` or `StartTask` APIs.", "DockerVolumeConfiguration": "This parameter is specified when you use Docker volumes.\n\nWindows containers only support the use of the `local` driver. To use bind mounts, specify the `host` parameter instead.\n\n> Docker volumes aren't supported by tasks run on AWS Fargate .", "EFSVolumeConfiguration": "This parameter is specified when you use an Amazon Elastic File System file system for task storage.", + "FSxWindowsFileServerVolumeConfiguration": "This parameter is specified when you use Amazon FSx for Windows File Server file system for task storage.", "Host": "This parameter is specified when you use bind mount host volumes. The contents of the `host` parameter determine whether your bind mount host volume persists on the host container instance and where it's stored. If the `host` parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives. For example, you can mount `C:\\my\\path:C:\\my\\path` and `D:\\:D:\\` , but not `D:\\my\\path:C:\\my\\path` or `D:\\:C:\\my\\path` .", "Name": "The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.\n\nWhen using a volume configured at launch, the `name` is required and must also be specified as the volume name in the `ServiceVolumeConfiguration` or `TaskVolumeConfiguration` parameter when creating your service or standalone task.\n\nFor all other types of volumes, this name is referenced in the `sourceVolume` parameter of the `mountPoints` object in the container definition.\n\nWhen a volume is using the `efsVolumeConfiguration` , the name is required." }, @@ -12760,7 +13171,7 @@ "Destinations": "An array of destination objects. Only one destination object is supported." }, "AWS::EFS::FileSystem ReplicationDestination": { - "AvailabilityZoneName": "The AWS For One Zone file systems, the replication configuration must specify the Availability Zone in which the destination file system is located.\n\nUse the format `us-east-1a` to specify the Availability Zone. For more information about One Zone file systems, see [EFS file system types](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the *Amazon EFS User Guide* .\n\n> One Zone file system type is not available in all Availability Zones in AWS Regions where Amazon EFS is available.", + "AvailabilityZoneName": "For One Zone file systems, the replication configuration must specify the Availability Zone in which the destination file system is located.\n\nUse the format `us-east-1a` to specify the Availability Zone. For more information about One Zone file systems, see [EFS file system types](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the *Amazon EFS User Guide* .\n\n> One Zone file system type is not available in all Availability Zones in AWS Regions where Amazon EFS is available.", "FileSystemId": "The ID of the destination Amazon EFS file system.", "KmsKeyId": "The ID of an AWS KMS key used to protect the encrypted file system.", "Region": "The AWS Region in which the destination file system is located.\n\n> For One Zone file systems, the replication configuration must specify the AWS Region in which the destination file system is located." @@ -13085,7 +13496,7 @@ "EmrManagedMasterSecurityGroup": "The identifier of the Amazon EC2 security group for the master node. If you specify `EmrManagedMasterSecurityGroup` , you must also specify `EmrManagedSlaveSecurityGroup` .", "EmrManagedSlaveSecurityGroup": "The identifier of the Amazon EC2 security group for the core and task nodes. If you specify `EmrManagedSlaveSecurityGroup` , you must also specify `EmrManagedMasterSecurityGroup` .", "HadoopVersion": "Applies only to Amazon EMR release versions earlier than 4.0. The Hadoop version for the cluster. Valid inputs are \"0.18\" (no longer maintained), \"0.20\" (no longer maintained), \"0.20.205\" (no longer maintained), \"1.0.3\", \"2.2.0\", or \"2.4.0\". If you do not set this value, the default of 0.18 is used, unless the `AmiVersion` parameter is set in the RunJobFlow call, in which case the default version of Hadoop for that AMI version is used.", - "KeepJobFlowAliveWhenNoSteps": "Specifies whether the cluster should remain available after completing all steps. Defaults to `true` . For more information about configuring cluster termination, see [Control Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html) in the *EMR Management Guide* .", + "KeepJobFlowAliveWhenNoSteps": "Specifies whether the cluster should remain available after completing all steps. Defaults to `false` . For more information about configuring cluster termination, see [Control Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html) in the *EMR Management Guide* .", "MasterInstanceFleet": "Describes the EC2 instances and instance configurations for the master instance fleet when using clusters with the instance fleet configuration.", "MasterInstanceGroup": "Describes the EC2 instances and instance configurations for the master instance group when using clusters with the uniform instance group configuration.", "Placement": "The Availability Zone in which the cluster runs.", @@ -14215,7 +14626,7 @@ "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowInputSource": { "InputSourceARN": "An AWS Glue table ARN for the input source table.", "SchemaArn": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` .", - "Type": "" + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowOutputSource": { "KMSArn": "Customer AWS KMS ARN for encryption at rest. If not provided, system will use an AWS Entity Resolution managed KMS key.", @@ -14230,8 +14641,33 @@ "ProviderServiceArn": "The ARN of the provider service." }, "AWS::EntityResolution::IdMappingWorkflow Tag": { - "Key": "", - "Value": "" + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, + "AWS::EntityResolution::IdNamespace": { + "Description": "The description of the ID namespace.", + "IdMappingWorkflowProperties": "Determines the properties of `IdMappingWorflow` where this `IdNamespace` can be used as a `Source` or a `Target` .", + "IdNamespaceName": "The name of the ID namespace.", + "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", + "RoleArn": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access the resources defined in this `IdNamespace` on your behalf as part of the workflow run.", + "Tags": "The tags used to organize, track, or control access for this resource.", + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." + }, + "AWS::EntityResolution::IdNamespace IdNamespaceIdMappingWorkflowProperties": { + "IdMappingType": "The type of ID mapping.", + "ProviderProperties": "An object which defines any additional configurations required by the provider service." + }, + "AWS::EntityResolution::IdNamespace IdNamespaceInputSource": { + "InputSourceARN": "An AWS Glue table ARN for the input source table.", + "SchemaName": "The name of the schema." + }, + "AWS::EntityResolution::IdNamespace NamespaceProviderProperties": { + "ProviderConfiguration": "An object which defines any additional configurations required by the provider service.", + "ProviderServiceArn": "The Amazon Resource Name (ARN) of the provider service." + }, + "AWS::EntityResolution::IdNamespace Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag." }, "AWS::EntityResolution::MatchingWorkflow": { "Description": "A description of the workflow.", @@ -14279,8 +14715,16 @@ "Rules": "A list of `Rule` objects, each of which have fields `RuleName` and `MatchingKeys` ." }, "AWS::EntityResolution::MatchingWorkflow Tag": { - "Key": "", - "Value": "" + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, + "AWS::EntityResolution::PolicyStatement": { + "Action": "The action that the principal can use on the resource.\n\nFor example, `entityresolution:GetIdMappingJob` , `entityresolution:GetMatchingJob` .", + "Arn": "The Amazon Resource Name (ARN) of the resource that will be accessed by the principal.", + "Condition": "A set of condition keys that you can use in key policies.", + "Effect": "Determines whether the permissions specified in the policy are to be allowed ( `Allow` ) or denied ( `Deny` ).", + "Principal": "The AWS service or AWS account that can access the resource defined as ARN.", + "StatementId": "A statement identifier that differentiates the statement from others in the same policy." }, "AWS::EntityResolution::SchemaMapping": { "Description": "A description of the schema.", @@ -14290,14 +14734,14 @@ }, "AWS::EntityResolution::SchemaMapping SchemaInputAttribute": { "FieldName": "A string containing the field name.", - "GroupName": "Instruct AWS Entity Resolution to combine several columns into a unified column with the identical attribute type. For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common `GroupName` will prompt AWS Entity Resolution to concatenate them into a single value.", - "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group. For example, let's consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning the `MatchKey` *Address* to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `MatchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "GroupName": "A string that instructs AWS Entity Resolution to combine several columns into a unified column with the identical attribute type.\n\nFor example, when working with columns such as `first_name` , `middle_name` , and `last_name` , assigning them a common `groupName` will prompt AWS Entity Resolution to concatenate them into a single value.", + "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "SubType": "The subtype of the attribute, selected from a list of values.", "Type": "The type of the attribute, selected from a list of values." }, "AWS::EntityResolution::SchemaMapping Tag": { - "Key": "", - "Value": "" + "Key": "The key of the tag.", + "Value": "The value of the tag." }, "AWS::EventSchemas::Discoverer": { "CrossAccount": "Allows for the discovery of the event schemas that are sent to the event bus from another account.", @@ -15563,6 +16007,20 @@ "Key": "A string that contains a `Tag` key.", "Value": "A string that contains a `Tag` value." }, + "AWS::GlobalAccelerator::CrossAccountAttachment": { + "Name": "", + "Principals": "", + "Resources": "", + "Tags": "" + }, + "AWS::GlobalAccelerator::CrossAccountAttachment Resource": { + "EndpointId": "The endpoint ID for the endpoint that is specified as a AWS resource.\n\nAn endpoint ID for the cross-account feature is the ARN of an AWS resource, such as a Network Load Balancer, that Global Accelerator supports as an endpoint for an accelerator.", + "Region": "The AWS Region where a shared endpoint resource is located." + }, + "AWS::GlobalAccelerator::CrossAccountAttachment Tag": { + "Key": "A string that contains a `Tag` key.", + "Value": "A string that contains a `Tag` value." + }, "AWS::GlobalAccelerator::EndpointGroup": { "EndpointConfigurations": "The list of endpoint objects.", "EndpointGroupRegion": "The AWS Regions where the endpoint group is located.", @@ -17089,6 +17547,21 @@ "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::IVS::EncoderConfiguration": { + "Name": "Encoder cnfiguration name.", + "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-encoderconfiguration-tag.html) .", + "Video": "Video configuration. Default: video resolution 1280x720, bitrate 2500 kbps, 30 fps. See the [Video](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-encoderconfiguration-video.html) property type for more information." + }, + "AWS::IVS::EncoderConfiguration Tag": { + "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", + "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." + }, + "AWS::IVS::EncoderConfiguration Video": { + "Bitrate": "Bitrate for generated output, in bps. Default: 2500000.", + "Framerate": "Video frame rate, in fps. Default: 30.", + "Height": "Video-resolution height. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720.", + "Width": "Video-resolution width. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280." + }, "AWS::IVS::PlaybackKeyPair": { "Name": "Playback-key-pair name. The value does not need to be unique.", "PublicKeyMaterial": "The public portion of a customer-generated key pair.", @@ -17098,8 +17571,19 @@ "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::IVS::PlaybackRestrictionPolicy": { + "AllowedCountries": "A list of country codes that control geoblocking restrictions. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).", + "AllowedOrigins": "A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at [https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin\"](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin)", + "EnableStrictOriginEnforcement": "Whether channel playback is constrained by the origin site.", + "Name": "Playback-restriction-policy name.", + "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-playbackrestrictionpolicy-tag.html) ." + }, + "AWS::IVS::PlaybackRestrictionPolicy Tag": { + "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", + "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." + }, "AWS::IVS::RecordingConfiguration": { - "DestinationConfiguration": "A destination configuration contains information about where recorded video will be stored. See the DestinationConfiguration property type for more information.", + "DestinationConfiguration": "A destination configuration describes an S3 bucket where recorded video will be stored. See the DestinationConfiguration property type for more information.", "Name": "Recording-configuration name. The value does not need to be unique.", "RecordingReconnectWindowSeconds": "If a broadcast disconnects and then reconnects within the specified interval, the multiple streams will be considered a single broadcast and merged together.\n\n*Default* : `0`", "RenditionConfiguration": "A rendition configuration describes which renditions should be recorded for a stream. See the RenditionConfiguration property type for more information.", @@ -17134,6 +17618,18 @@ "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::IVS::StorageConfiguration": { + "Name": "Storage cnfiguration name.", + "S3": "An S3 storage configuration contains information about where recorded video will be stored. See the [S3StorageConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-storageconfiguration-s3storageconfiguration.html) property type for more information.", + "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-storageconfiguration-tag.html) ." + }, + "AWS::IVS::StorageConfiguration S3StorageConfiguration": { + "BucketName": "Name of the S3 bucket where recorded video will be stored." + }, + "AWS::IVS::StorageConfiguration Tag": { + "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", + "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." + }, "AWS::IVS::StreamKey": { "ChannelArn": "Channel ARN for the stream.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-streamkey-tag.html) ." @@ -19868,6 +20364,7 @@ "MultiRegion": "Creates a multi-Region primary key that you can replicate in other AWS Regions . You can't change the `MultiRegion` value after the KMS key is created.\n\nFor a list of AWS Regions in which multi-Region keys are supported, see [Multi-Region keys in AWS KMS](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) in the ** .\n\n> If you change the value of the `MultiRegion` property on an existing KMS key, the update request fails, regardless of the value of the [`UpdateReplacePolicy` attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatereplacepolicy.html) . This prevents you from accidentally deleting a KMS key by changing an immutable property value. \n\nFor a multi-Region key, set to this property to `true` . For a single-Region key, omit this property or set it to `false` . The default value is `false` .\n\n*Multi-Region keys* are an AWS KMS feature that lets you create multiple interoperable KMS keys in different AWS Regions . Because these KMS keys have the same key ID, key material, and other metadata, you can use them to encrypt data in one AWS Region and decrypt it in a different AWS Region without making a cross-Region call or exposing the plaintext data. For more information, see [Multi-Region keys](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) in the *AWS Key Management Service Developer Guide* .\n\nYou can create a symmetric encryption, HMAC, or asymmetric multi-Region KMS key, and you can create a multi-Region key with imported key material. However, you cannot create a multi-Region key in a custom key store.\n\nTo create a replica of this primary key in a different AWS Region , create an [AWS::KMS::ReplicaKey](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-replicakey.html) resource in a CloudFormation stack in the replica Region. Specify the key ARN of this primary key.", "Origin": "The source of the key material for the KMS key. You cannot change the origin after you create the KMS key. The default is `AWS_KMS` , which means that AWS KMS creates the key material.\n\nTo [create a KMS key with no key material](https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html) (for imported key material), set this value to `EXTERNAL` . For more information about importing key material into AWS KMS , see [Importing Key Material](https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in the *AWS Key Management Service Developer Guide* .\n\nYou can ignore `ENABLED` when Origin is `EXTERNAL` . When a KMS key with Origin `EXTERNAL` is created, the key state is `PENDING_IMPORT` and `ENABLED` is `false` . After you import the key material, `ENABLED` updated to `true` . The KMS key can then be used for Cryptographic Operations.\n\n> AWS CloudFormation doesn't support creating an `Origin` parameter of the `AWS_CLOUDHSM` or `EXTERNAL_KEY_STORE` values.", "PendingWindowInDays": "Specifies the number of days in the waiting period before AWS KMS deletes a KMS key that has been removed from a CloudFormation stack. Enter a value between 7 and 30 days. The default value is 30 days.\n\nWhen you remove a KMS key from a CloudFormation stack, AWS KMS schedules the KMS key for deletion and starts the mandatory waiting period. The `PendingWindowInDays` property determines the length of waiting period. During the waiting period, the key state of KMS key is `Pending Deletion` or `Pending Replica Deletion` , which prevents the KMS key from being used in cryptographic operations. When the waiting period expires, AWS KMS permanently deletes the KMS key.\n\nAWS KMS will not delete a [multi-Region primary key](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) that has replica keys. If you remove a multi-Region primary key from a CloudFormation stack, its key state changes to `PendingReplicaDeletion` so it cannot be replicated or used in cryptographic operations. This state can persist indefinitely. When the last of its replica keys is deleted, the key state of the primary key changes to `PendingDeletion` and the waiting period specified by `PendingWindowInDays` begins. When this waiting period expires, AWS KMS deletes the primary key. For details, see [Deleting multi-Region keys](https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) in the *AWS Key Management Service Developer Guide* .\n\nYou cannot use a CloudFormation template to cancel deletion of the KMS key after you remove it from the stack, regardless of the waiting period. If you specify a KMS key in your template, even one with the same name, CloudFormation creates a new KMS key. To cancel deletion of a KMS key, use the AWS KMS console or the [CancelKeyDeletion](https://docs.aws.amazon.com/kms/latest/APIReference/API_CancelKeyDeletion.html) operation.\n\nFor information about the `Pending Deletion` and `Pending Replica Deletion` key states, see [Key state: Effect on your KMS key](https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the *AWS Key Management Service Developer Guide* . For more information about deleting KMS keys, see the [ScheduleKeyDeletion](https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html) operation in the *AWS Key Management Service API Reference* and [Deleting KMS keys](https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) in the *AWS Key Management Service Developer Guide* .", + "RotationPeriodInDays": "The number of days between each automatic rotation. The default value is 365 days.", "Tags": "Assigns one or more tags to the replica key.\n\n> Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see [ABAC for AWS KMS](https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the *AWS Key Management Service Developer Guide* . \n\nFor information about tags in AWS KMS , see [Tagging keys](https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html) in the *AWS Key Management Service Developer Guide* . For information about tags in CloudFormation, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, "AWS::KMS::Key Tag": { @@ -24958,6 +25455,7 @@ "Value": "Part of the key:value pair that defines a tag. You can use a tag value to describe a specific value within a category, such as \"companyA\" or \"companyB.\" Tag values are case-sensitive." }, "AWS::MediaTailor::Channel": { + "Audiences": "The list of audiences defined in channel.", "ChannelName": "The name of the channel.", "FillerSlate": "The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the `LINEAR` `PlaybackMode` . MediaTailor doesn't support filler slate for channels using the `LOOP` `PlaybackMode` .", "LogConfiguration": "The log configuration.", @@ -25905,10 +26403,18 @@ }, "AWS::Oam::Link": { "LabelTemplate": "Specify a friendly human-readable name to use to identify this source account when you are viewing data from it in the monitoring account.\n\nYou can include the following variables in your template:\n\n- `$AccountName` is the name of the account\n- `$AccountEmail` is a globally-unique email address, which includes the email domain, such as `mariagarcia@example.com`\n- `$AccountEmailNoDomain` is an email address without the domain name, such as `mariagarcia`", + "LinkConfiguration": "", "ResourceTypes": "An array of strings that define which types of data that the source account shares with the monitoring account. Valid values are `AWS::CloudWatch::Metric | AWS::Logs::LogGroup | AWS::XRay::Trace | AWS::ApplicationInsights::Application | AWS::InternetMonitor::Monitor` .", "SinkIdentifier": "The ARN of the sink in the monitoring account that you want to link to. You can use [ListSinks](https://docs.aws.amazon.com/OAM/latest/APIReference/API_ListSinks.html) to find the ARNs of sinks.", "Tags": "An array of key-value pairs to apply to the link.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, + "AWS::Oam::Link LinkConfiguration": { + "LogGroupConfiguration": "", + "MetricConfiguration": "" + }, + "AWS::Oam::Link LinkFilter": { + "Filter": "" + }, "AWS::Oam::Sink": { "Name": "A name for the sink.", "Policy": "The IAM policy that grants permissions to source accounts to link to this sink. The policy can grant permission in the following ways:\n\n- Include organization IDs or organization paths to permit all accounts in an organization\n- Include account IDs to permit the specified accounts", @@ -26736,7 +27242,7 @@ "AWS::Personalize::Dataset": { "DatasetGroupArn": "The Amazon Resource Name (ARN) of the dataset group.", "DatasetImportJob": "Describes a job that imports training data from a data source (Amazon S3 bucket) to an Amazon Personalize dataset. If you specify a dataset import job as part of a dataset, all dataset import job fields are required.", - "DatasetType": "One of the following values:\n\n- Interactions\n- Items\n- Users\n- Actions\n- Action_Interactions", + "DatasetType": "One of the following values:\n\n- Interactions\n- Items\n- Users\n\n> You can't use CloudFormation to create an Action Interactions or Actions dataset.", "Name": "The name of the dataset.", "SchemaArn": "The ARN of the associated schema." }, @@ -27015,7 +27521,7 @@ "BaiduMessage": "The message that the campaign sends through the Baidu (Baidu Cloud Push) channel. If specified, this message overrides the default message.", "CustomMessage": "The message that the campaign sends through a custom channel, as specified by the delivery configuration ( `CustomDeliveryConfiguration` ) settings for the campaign. If specified, this message overrides the default message.", "DefaultMessage": "The default message that the campaign sends through all the channels that are configured for the campaign.", - "EmailMessage": "The message that the campaign sends through the email channel. If specified, this message overrides the default message.", + "EmailMessage": "The message that the campaign sends through the email channel. If specified, this message overrides the default message.\n\n> The maximum email message size is 200KB. You can use email templates to send larger email messages.", "GCMMessage": "The message that the campaign sends through the GCM channel, which enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service. If specified, this message overrides the default message.", "InAppMessage": "The default message for the in-app messaging channel. This message overrides the default message ( `DefaultMessage` ).", "SMSMessage": "The message that the campaign sends through the SMS channel. If specified, this message overrides the default message." @@ -28466,7 +28972,7 @@ "FilterControlId": "The ID of the `FilterDateTimePickerControl` .", "SourceFilterId": "The source filter ID of the `FilterDateTimePickerControl` .", "Title": "The title of the `FilterDateTimePickerControl` .", - "Type": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range." + "Type": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu." }, "AWS::QuickSight::Analysis FilterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", @@ -28497,7 +29003,7 @@ "SelectableValues": "A list of selectable values that are used in a control.", "SourceFilterId": "The source filter ID of the `FilterListControl` .", "Title": "The title of the `FilterListControl` .", - "Type": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list." + "Type": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list." }, "AWS::QuickSight::Analysis FilterOperationSelectedFieldsConfiguration": { "SelectedColumns": "The selected columns of a dataset.", @@ -28514,7 +29020,7 @@ "Title": "The title of the `FilterTextAreaControl` ." }, "AWS::QuickSight::Analysis FilterScopeConfiguration": { - "AllSheets": "The configuration for applying a filter to all sheets.", + "AllSheets": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "SelectedSheets": "The configuration for applying a filter to specific sheets." }, "AWS::QuickSight::Analysis FilterSelectableValues": { @@ -28523,12 +29029,12 @@ "AWS::QuickSight::Analysis FilterSliderControl": { "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterSliderControl` .", - "MaximumValue": "The smaller value that is displayed at the left of the slider.", - "MinimumValue": "The larger value that is displayed at the right of the slider.", + "MaximumValue": "The larger value that is displayed at the right of the slider.", + "MinimumValue": "The smaller value that is displayed at the left of the slider.", "SourceFilterId": "The source filter ID of the `FilterSliderControl` .", "StepSize": "The number of increments that the slider bar is divided into.", "Title": "The title of the `FilterSliderControl` .", - "Type": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range." + "Type": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range." }, "AWS::QuickSight::Analysis FilterTextAreaControl": { "Delimiter": "The delimiter that is used to separate the lines in text.", @@ -29239,8 +29745,8 @@ }, "AWS::QuickSight::Analysis ParameterSliderControl": { "DisplayOptions": "The display options of a control.", - "MaximumValue": "The smaller value that is displayed at the left of the slider.", - "MinimumValue": "The larger value that is displayed at the right of the slider.", + "MaximumValue": "The larger value that is displayed at the right of the slider.", + "MinimumValue": "The smaller value that is displayed at the left of the slider.", "ParameterControlId": "The ID of the `ParameterSliderControl` .", "SourceParameterName": "The source parameter name of the `ParameterSliderControl` .", "StepSize": "The number of increments that the slider bar is divided into.", @@ -31028,7 +31534,7 @@ "FilterControlId": "The ID of the `FilterDateTimePickerControl` .", "SourceFilterId": "The source filter ID of the `FilterDateTimePickerControl` .", "Title": "The title of the `FilterDateTimePickerControl` .", - "Type": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range." + "Type": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu." }, "AWS::QuickSight::Dashboard FilterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", @@ -31059,7 +31565,7 @@ "SelectableValues": "A list of selectable values that are used in a control.", "SourceFilterId": "The source filter ID of the `FilterListControl` .", "Title": "The title of the `FilterListControl` .", - "Type": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list." + "Type": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list." }, "AWS::QuickSight::Dashboard FilterOperationSelectedFieldsConfiguration": { "SelectedColumns": "The selected columns of a dataset.", @@ -31076,7 +31582,7 @@ "Title": "The title of the `FilterTextAreaControl` ." }, "AWS::QuickSight::Dashboard FilterScopeConfiguration": { - "AllSheets": "The configuration for applying a filter to all sheets.", + "AllSheets": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "SelectedSheets": "The configuration for applying a filter to specific sheets." }, "AWS::QuickSight::Dashboard FilterSelectableValues": { @@ -31085,12 +31591,12 @@ "AWS::QuickSight::Dashboard FilterSliderControl": { "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterSliderControl` .", - "MaximumValue": "The smaller value that is displayed at the left of the slider.", - "MinimumValue": "The larger value that is displayed at the right of the slider.", + "MaximumValue": "The larger value that is displayed at the right of the slider.", + "MinimumValue": "The smaller value that is displayed at the left of the slider.", "SourceFilterId": "The source filter ID of the `FilterSliderControl` .", "StepSize": "The number of increments that the slider bar is divided into.", "Title": "The title of the `FilterSliderControl` .", - "Type": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range." + "Type": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range." }, "AWS::QuickSight::Dashboard FilterTextAreaControl": { "Delimiter": "The delimiter that is used to separate the lines in text.", @@ -31804,8 +32310,8 @@ }, "AWS::QuickSight::Dashboard ParameterSliderControl": { "DisplayOptions": "The display options of a control.", - "MaximumValue": "The smaller value that is displayed at the left of the slider.", - "MinimumValue": "The larger value that is displayed at the right of the slider.", + "MaximumValue": "The larger value that is displayed at the right of the slider.", + "MinimumValue": "The smaller value that is displayed at the left of the slider.", "ParameterControlId": "The ID of the `ParameterSliderControl` .", "SourceParameterName": "The source parameter name of the `ParameterSliderControl` .", "StepSize": "The number of increments that the slider bar is divided into.", @@ -33963,7 +34469,7 @@ "FilterControlId": "The ID of the `FilterDateTimePickerControl` .", "SourceFilterId": "The source filter ID of the `FilterDateTimePickerControl` .", "Title": "The title of the `FilterDateTimePickerControl` .", - "Type": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range." + "Type": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu." }, "AWS::QuickSight::Template FilterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", @@ -33994,7 +34500,7 @@ "SelectableValues": "A list of selectable values that are used in a control.", "SourceFilterId": "The source filter ID of the `FilterListControl` .", "Title": "The title of the `FilterListControl` .", - "Type": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list." + "Type": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list." }, "AWS::QuickSight::Template FilterOperationSelectedFieldsConfiguration": { "SelectedColumns": "The selected columns of a dataset.", @@ -34011,7 +34517,7 @@ "Title": "The title of the `FilterTextAreaControl` ." }, "AWS::QuickSight::Template FilterScopeConfiguration": { - "AllSheets": "The configuration for applying a filter to all sheets.", + "AllSheets": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "SelectedSheets": "The configuration for applying a filter to specific sheets." }, "AWS::QuickSight::Template FilterSelectableValues": { @@ -34020,12 +34526,12 @@ "AWS::QuickSight::Template FilterSliderControl": { "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterSliderControl` .", - "MaximumValue": "The smaller value that is displayed at the left of the slider.", - "MinimumValue": "The larger value that is displayed at the right of the slider.", + "MaximumValue": "The larger value that is displayed at the right of the slider.", + "MinimumValue": "The smaller value that is displayed at the left of the slider.", "SourceFilterId": "The source filter ID of the `FilterSliderControl` .", "StepSize": "The number of increments that the slider bar is divided into.", "Title": "The title of the `FilterSliderControl` .", - "Type": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range." + "Type": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range." }, "AWS::QuickSight::Template FilterTextAreaControl": { "Delimiter": "The delimiter that is used to separate the lines in text.", @@ -34732,8 +35238,8 @@ }, "AWS::QuickSight::Template ParameterSliderControl": { "DisplayOptions": "The display options of a control.", - "MaximumValue": "The smaller value that is displayed at the left of the slider.", - "MinimumValue": "The larger value that is displayed at the right of the slider.", + "MaximumValue": "The larger value that is displayed at the right of the slider.", + "MinimumValue": "The smaller value that is displayed at the left of the slider.", "ParameterControlId": "The ID of the `ParameterSliderControl` .", "SourceParameterName": "The source parameter name of the `ParameterSliderControl` .", "StepSize": "The number of increments that the slider bar is divided into.", @@ -36044,10 +36550,13 @@ "Description": "An optional description of your CEV.", "Engine": "The database engine to use for your custom engine version (CEV).\n\nValid values:\n\n- `custom-oracle-ee`\n- `custom-oracle-ee-cdb`", "EngineVersion": "The name of your CEV. The name format is `major version.customized_string` . For example, a valid CEV name is `19.my_cev1` . This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. The combination of `Engine` and `EngineVersion` is unique per customer per Region.\n\n*Constraints:* Minimum length is 1. Maximum length is 60.\n\n*Pattern:* `^[a-z0-9_.-]{1,60$` }", + "ImageId": "A value that indicates the ID of the AMI.", "KMSKeyId": "The AWS KMS key identifier for an encrypted CEV. A symmetric encryption KMS key is required for RDS Custom, but optional for Amazon RDS.\n\nIf you have an existing symmetric encryption KMS key in your account, you can use it with RDS Custom. No further action is necessary. If you don't already have a symmetric encryption KMS key in your account, follow the instructions in [Creating a symmetric encryption KMS key](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html#create-symmetric-cmk) in the *AWS Key Management Service Developer Guide* .\n\nYou can choose the same symmetric encryption key when you create a CEV and a DB instance, or choose different keys.", "Manifest": "The CEV manifest, which is a JSON document that describes the installation .zip files stored in Amazon S3. Specify the name/value pairs in a file or a quoted string. RDS Custom applies the patches in the order in which they are listed.\n\nThe following JSON fields are valid:\n\n- **MediaImportTemplateVersion** - Version of the CEV manifest. The date is in the format `YYYY-MM-DD` .\n- **databaseInstallationFileNames** - Ordered list of installation files for the CEV.\n- **opatchFileNames** - Ordered list of OPatch installers used for the Oracle DB engine.\n- **psuRuPatchFileNames** - The PSU and RU patches for this CEV.\n- **OtherPatchFileNames** - The patches that are not in the list of PSU and RU patches. Amazon RDS applies these patches after applying the PSU and RU patches.\n\nFor more information, see [Creating the CEV manifest](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-cev.html#custom-cev.preparing.manifest) in the *Amazon RDS User Guide* .", + "SourceCustomDbEngineVersionIdentifier": "The ARN of a CEV to use as a source for creating a new CEV. You can specify a different Amazon Machine Imagine (AMI) by using either `Source` or `UseAwsProvidedLatestImage` . You can't specify a different JSON manifest when you specify `SourceCustomDbEngineVersionIdentifier` .", "Status": "A value that indicates the status of a custom engine version (CEV).", - "Tags": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.*" + "Tags": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.*", + "UseAwsProvidedLatestImage": "Specifies whether to use the latest service-provided Amazon Machine Image (AMI) for the CEV. If you specify `UseAwsProvidedLatestImage` , you can't also specify `ImageId` ." }, "AWS::RDS::CustomDBEngineVersion Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -36064,7 +36573,7 @@ "DBClusterIdentifier": "The DB cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- First character must be a letter.\n- Can't end with a hyphen or contain two consecutive hyphens.\n\nExample: `my-cluster1`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterInstanceClass": "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example `db.m6gd.xlarge` . Not all DB instance classes are available in all AWS Regions , or for all database engines.\n\nFor the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nValid for Cluster Type: Multi-AZ DB clusters only", "DBClusterParameterGroupName": "The name of the DB cluster parameter group to associate with this DB cluster.\n\n> If you apply a parameter group to an existing DB cluster, then its DB instances might need to reboot. This can result in an outage while the DB instances are rebooting.\n> \n> If you apply a change to parameter group associated with a stopped DB cluster, then the update stack waits until the DB cluster is started. \n\nTo list all of the available DB cluster parameter group names, use the following command:\n\n`aws rds describe-db-cluster-parameter-groups --query \"DBClusterParameterGroups[].DBClusterParameterGroupName\" --output text`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "DBInstanceParameterGroupName": "The name of the DB parameter group to apply to all instances of the DB cluster.\n\n> When you apply a parameter group using the `DBInstanceParameterGroupName` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. \n\nDefault: The existing name setting\n\nConstraints:\n\n- The DB parameter group must be in the same DB parameter group family as this DB cluster.", + "DBInstanceParameterGroupName": "The name of the DB parameter group to apply to all instances of the DB cluster.\n\n> When you apply a parameter group using the `DBInstanceParameterGroupName` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. \n\nValid for Cluster Type: Aurora DB clusters only\n\nDefault: The existing name setting\n\nConstraints:\n\n- The DB parameter group must be in the same DB parameter group family as this DB cluster.\n- The `DBInstanceParameterGroupName` parameter is valid in combination with the `AllowMajorVersionUpgrade` parameter for a major version upgrade only.", "DBSubnetGroupName": "A DB subnet group that you want to associate with this DB cluster.\n\nIf you are restoring a DB cluster to a point in time with `RestoreType` set to `copy-on-write` , and don't specify a DB subnet group name, then the DB cluster is restored with a default DB subnet group.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBSystemId": "Reserved for future use.", "DatabaseName": "The name of your database. If you don't provide a name, then Amazon RDS won't create a database in this DB cluster. For naming constraints, see [Naming Constraints](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -36076,7 +36585,7 @@ "EnableHttpEndpoint": "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n\nWhen enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n\nRDS Data API is supported with the following DB clusters:\n\n- Aurora PostgreSQL Serverless v2 and provisioned\n- Aurora PostgreSQL and Aurora MySQL Serverless v1\n\nFor more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide* .\n\nValid for Cluster Type: Aurora DB clusters only", "EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n\nFor more information, see [IAM Database Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon Aurora User Guide.*\n\nValid for: Aurora DB clusters only", "Engine": "The name of the database engine to be used for this DB cluster.\n\nValid Values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", + "EngineMode": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the `provisioned` engine mode.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", "EngineVersion": "The version number of the database engine to use.\n\nTo list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (8.0-compatible), use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"`\n\nYou can supply either `5.7` or `8.0` to use the default engine version for Aurora MySQL version 2 or version 3, respectively.\n\nTo list all of the available engine versions for Aurora PostgreSQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for RDS for MySQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"`\n\nTo list all of the available engine versions for RDS for PostgreSQL, use the following command:\n\n`aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"`\n\n*Aurora MySQL*\n\nFor information, see [Database engine updates for Amazon Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) in the *Amazon Aurora User Guide* .\n\n*Aurora PostgreSQL*\n\nFor information, see [Amazon Aurora PostgreSQL releases and engine versions](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) in the *Amazon Aurora User Guide* .\n\n*MySQL*\n\nFor information, see [Amazon RDS for MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the *Amazon RDS User Guide* .\n\n*PostgreSQL*\n\nFor information, see [Amazon RDS for PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the *Amazon RDS User Guide* .\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "GlobalClusterIdentifier": "If you are configuring an Aurora global database cluster and want your Aurora DB cluster to be a secondary member in the global database cluster, specify the global cluster ID of the global database cluster. To define the primary database cluster of the global cluster, use the [AWS::RDS::GlobalCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-globalcluster.html) resource.\n\nIf you aren't configuring a global database cluster, don't specify this property.\n\n> To remove the DB cluster from a global database cluster, specify an empty value for the `GlobalClusterIdentifier` property. \n\nFor information about Aurora global databases, see [Working with Amazon Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", "Iops": "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.\n\nFor information about valid IOPS values, see [Provisioned IOPS storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the *Amazon RDS User Guide* .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nConstraints:\n\n- Must be a multiple between .5 and 50 of the storage amount for the DB cluster.", @@ -36189,10 +36698,10 @@ "EnableIAMDatabaseAuthentication": "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n\nThis property is supported for RDS for MariaDB, RDS for MySQL, and RDS for PostgreSQL. For more information, see [IAM Database Authentication for MariaDB, MySQL, and PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon RDS User Guide.*\n\n*Amazon Aurora*\n\nNot applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster.", "EnablePerformanceInsights": "Specifies whether to enable Performance Insights for the DB instance. For more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to RDS Custom DB instances.", "Endpoint": "The connection endpoint for the DB instance.\n\n> The endpoint might not be shown for instances with the status of `creating` .", - "Engine": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can change the architecture of an Oracle database from the non-container database (CDB) architecture to the CDB architecture by updating the `Engine` value in your templates from `oracle-ee` or `oracle-ee-cdb` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "Engine": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can convert an Oracle database from the non-CDB architecture to the container database (CDB) architecture by updating the `Engine` value in your templates from `oracle-ee` to `oracle-ee-cdb` or from `oracle-se2` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "EngineVersion": "The version number of the database engine to use.\n\nFor a list of valid engine versions, use the `DescribeDBEngineVersions` action.\n\nThe following are the database engines and links to information about the major and minor versions that are available with Amazon RDS. Not every database engine is available for every AWS Region.\n\n*Amazon Aurora*\n\nNot applicable. The version number of the database engine to be used by the DB instance is managed by the DB cluster.\n\n*Db2*\n\nSee [Amazon RDS for Db2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Db2.html#Db2.Concepts.VersionMgmt) in the *Amazon RDS User Guide.*\n\n*MariaDB*\n\nSee [MariaDB on Amazon RDS Versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MariaDB.html#MariaDB.Concepts.VersionMgmt) in the *Amazon RDS User Guide.*\n\n*Microsoft SQL Server*\n\nSee [Microsoft SQL Server Versions on Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) in the *Amazon RDS User Guide.*\n\n*MySQL*\n\nSee [MySQL on Amazon RDS Versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the *Amazon RDS User Guide.*\n\n*Oracle*\n\nSee [Oracle Database Engine Release Notes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.PatchComposition.html) in the *Amazon RDS User Guide.*\n\n*PostgreSQL*\n\nSee [Supported PostgreSQL Database Versions](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts.General.DBVersions) in the *Amazon RDS User Guide.*", "Iops": "The number of I/O operations per second (IOPS) that the database provisions. The value must be equal to or greater than 1000.\n\nIf you specify this property, you must follow the range of allowed ratios of your requested IOPS rate to the amount of storage that you allocate (IOPS to allocated storage). For example, you can provision an Oracle database instance with 1000 IOPS and 200 GiB of storage (a ratio of 5:1), or specify 2000 IOPS with 200 GiB of storage (a ratio of 10:1). For more information, see [Amazon RDS Provisioned IOPS Storage to Improve Performance](https://docs.aws.amazon.com/AmazonRDS/latest/DeveloperGuide/CHAP_Storage.html#USER_PIOPS) in the *Amazon RDS User Guide* .\n\n> If you specify `io1` for the `StorageType` property, then you must also specify the `Iops` property. \n\nConstraints:\n\n- For RDS for Db2, MariaDB, MySQL, Oracle, and PostgreSQL - Must be a multiple between .5 and 50 of the storage amount for the DB instance.\n- For RDS for SQL Server - Must be a multiple between 1 and 50 of the storage amount for the DB instance.", - "KmsKeyId": "The ARN of the AWS KMS key that's used to encrypt the DB instance, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the StorageEncrypted property but don't specify this property, AWS CloudFormation uses the default KMS key. If you specify this property, you must set the StorageEncrypted property to true.\n\nIf you specify the `SourceDBInstanceIdentifier` property, the value is inherited from the source DB instance if the read replica is created in the same region.\n\nIf you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the region that they're created in, and you can't use encryption keys from one region in another region.\n\nIf you specify the `SnapshotIdentifier` property, the `StorageEncrypted` property value is inherited from the snapshot, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify `DBSecurityGroups` , AWS CloudFormation ignores this property. To specify both a security group and this property, you must use a VPC security group. For more information about Amazon RDS and VPC, see [Using Amazon RDS with Amazon VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The KMS key identifier is managed by the DB cluster.", + "KmsKeyId": "The ARN of the AWS KMS key that's used to encrypt the DB instance, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the StorageEncrypted property but don't specify this property, AWS CloudFormation uses the default KMS key. If you specify this property, you must set the StorageEncrypted property to true.\n\nIf you specify the `SourceDBInstanceIdentifier` property, the value is inherited from the source DB instance if the read replica is created in the same region.\n\nIf you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the region that they're created in, and you can't use encryption keys from one region in another region.\n\nIf you specify the `DBSnapshotIdentifier` property, don't specify this property. The `StorageEncrypted` property value is inherited from the snapshot. If the DB instance is encrypted, the specified `KmsKeyId` property is also inherited from the snapshot.\n\nIf you specify `DBSecurityGroups` , AWS CloudFormation ignores this property. To specify both a security group and this property, you must use a VPC security group. For more information about Amazon RDS and VPC, see [Using Amazon RDS with Amazon VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The KMS key identifier is managed by the DB cluster.", "LicenseModel": "License model information for this DB instance.\n\nValid Values:\n\n- Aurora MySQL - `general-public-license`\n- Aurora PostgreSQL - `postgresql-license`\n- RDS for Db2 - `bring-your-own-license` . For more information about RDS for Db2 licensing, see [](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/db2-licensing.html) in the *Amazon RDS User Guide.*\n- RDS for MariaDB - `general-public-license`\n- RDS for Microsoft SQL Server - `license-included`\n- RDS for MySQL - `general-public-license`\n- RDS for Oracle - `bring-your-own-license` or `license-included`\n- RDS for PostgreSQL - `postgresql-license`\n\n> If you've specified `DBSecurityGroups` and then you update the license model, AWS CloudFormation replaces the underlying DB instance. This will incur some interruptions to database availability.", "ManageMasterUserPassword": "Specifies whether to manage the master user password with AWS Secrets Manager.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide.*\n\nConstraints:\n\n- Can't manage the master user password with AWS Secrets Manager if `MasterUserPassword` is specified.", "MasterUserPassword": "The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".\n\n*Amazon Aurora*\n\nNot applicable. The password for the master user is managed by the DB cluster.\n\n*RDS for Db2*\n\nMust contain from 8 to 255 characters.\n\n*RDS for MariaDB*\n\nConstraints: Must contain from 8 to 41 characters.\n\n*RDS for Microsoft SQL Server*\n\nConstraints: Must contain from 8 to 128 characters.\n\n*RDS for MySQL*\n\nConstraints: Must contain from 8 to 41 characters.\n\n*RDS for Oracle*\n\nConstraints: Must contain from 8 to 30 characters.\n\n*RDS for PostgreSQL*\n\nConstraints: Must contain from 8 to 128 characters.", @@ -36220,7 +36729,7 @@ "SourceDBInstanceIdentifier": "If you want to create a read replica DB instance, specify the ID of the source DB instance. Each DB instance can have a limited number of read replicas. For more information, see [Working with Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/DeveloperGuide/USER_ReadRepl.html) in the *Amazon RDS User Guide* .\n\nFor information about constraints that apply to DB instance identifiers, see [Naming constraints in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon RDS User Guide* .\n\nThe `SourceDBInstanceIdentifier` property determines whether a DB instance is a read replica. If you remove the `SourceDBInstanceIdentifier` property from your template and then update your stack, AWS CloudFormation promotes the Read Replica to a standalone DB instance.\n\n> - If you specify a source DB instance that uses VPC security groups, we recommend that you specify the `VPCSecurityGroups` property. If you don't specify the property, the read replica inherits the value of the `VPCSecurityGroups` property from the source DB when you create the replica. However, if you update the stack, AWS CloudFormation reverts the replica's `VPCSecurityGroups` property to the default value because it's not defined in the stack's template. This change might cause unexpected issues.\n> - Read replicas don't support deletion policies. AWS CloudFormation ignores any deletion policy that's associated with a read replica.\n> - If you specify `SourceDBInstanceIdentifier` , don't specify the `DBSnapshotIdentifier` property. You can't create a read replica from a snapshot.\n> - Don't set the `BackupRetentionPeriod` , `DBName` , `MasterUsername` , `MasterUserPassword` , and `PreferredBackupWindow` properties. The database attributes are inherited from the source DB instance, and backups are disabled for read replicas.\n> - If the source DB instance is in a different region than the read replica, specify the source region in `SourceRegion` , and specify an ARN for a valid DB instance in `SourceDBInstanceIdentifier` . For more information, see [Constructing a Amazon RDS Amazon Resource Name (ARN)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN) in the *Amazon RDS User Guide* .\n> - For DB instances in Amazon Aurora clusters, don't specify this property. Amazon RDS automatically assigns writer and reader DB instances.", "SourceDbiResourceId": "The resource ID of the source DB instance from which to restore.", "SourceRegion": "The ID of the region that contains the source DB instance for the read replica.", - "StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB instance is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB instance to be encrypted, then don't set this property or set it to `false` .\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", + "StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the snapshot.\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "StorageThroughput": "Specifies the storage throughput value for the DB instance. This setting applies only to the `gp3` storage type.\n\nThis setting doesn't apply to RDS Custom or Amazon Aurora.", "StorageType": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", "Tags": "An optional array of key-value pairs to apply to this DB instance.", @@ -36636,6 +37145,7 @@ "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "NamespaceResourcePolicy": "The resource policy that will be attached to the namespace.", "RedshiftIdcApplicationArn": "The ARN for the Redshift application that integrates with IAM Identity Center.", + "SnapshotCopyConfigurations": "", "Tags": "The map of the key-value pairs used to tag the namespace." }, "AWS::RedshiftServerless::Namespace Namespace": { @@ -36653,6 +37163,11 @@ "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "Status": "The status of the namespace." }, + "AWS::RedshiftServerless::Namespace SnapshotCopyConfiguration": { + "DestinationKmsKeyId": "The ID of the KMS key to use to encrypt your snapshots in the destination AWS Region .", + "DestinationRegion": "The destination AWS Region to copy snapshots to.", + "SnapshotRetentionPeriod": "The retention period of snapshots that are copied to the destination AWS Region ." + }, "AWS::RedshiftServerless::Namespace Tag": { "Key": "The key to use in the tag.", "Value": "The value of the tag." @@ -37110,7 +37625,7 @@ "HostedZoneConfig": "A complex type that contains an optional comment.\n\nIf you don't want to specify a comment, omit the `HostedZoneConfig` and `Comment` elements.", "HostedZoneTags": "Adds, edits, or deletes tags for a health check or a hosted zone.\n\nFor information about using tags for cost allocation, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the *AWS Billing and Cost Management User Guide* .", "Name": "The name of the domain. Specify a fully qualified domain name, for example, *www.example.com* . The trailing dot is optional; Amazon Route 53 assumes that the domain name is fully qualified. This means that Route 53 treats *www.example.com* (without a trailing dot) and *www.example.com.* (with a trailing dot) as identical.\n\nIf you're creating a public hosted zone, this is the name you have registered with your DNS registrar. If your domain name is registered with a registrar other than Route 53, change the name servers for your domain to the set of `NameServers` that are returned by the `Fn::GetAtt` intrinsic function.", - "QueryLoggingConfig": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", + "QueryLoggingConfig": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. You must create the CloudWatch Logs resource policy in the us-east-1 region. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", "VPCs": "*Private hosted zones:* A complex type that contains information about the VPCs that are associated with the specified hosted zone.\n\n> For public hosted zones, omit `VPCs` , `VPCId` , and `VPCRegion` ." }, "AWS::Route53::HostedZone HostedZoneConfig": { @@ -37149,7 +37664,7 @@ "ResourceRecords": "One or more values that correspond with the value that you specified for the `Type` property. For example, if you specified `A` for `Type` , you specify one or more IP addresses in IPv4 format for `ResourceRecords` . For information about the format of values for each record type, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nNote the following:\n\n- You can specify more than one value for all record types except CNAME and SOA.\n- The maximum length of a value is 4000 characters.\n- If you're creating an alias record, omit `ResourceRecords` .", "SetIdentifier": "*Resource record sets that have a routing policy other than simple:* An identifier that differentiates among multiple resource record sets that have the same combination of name and type, such as multiple weighted resource record sets named acme.example.com that have a type of A. In a group of resource record sets that have the same name and type, the value of `SetIdentifier` must be unique for each resource record set.\n\nFor information about routing policies, see [Choosing a Routing Policy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html) in the *Amazon Route 53 Developer Guide* .", "TTL": "The resource record cache time to live (TTL), in seconds. Note the following:\n\n- If you're creating or updating an alias resource record set, omit `TTL` . Amazon Route 53 uses the value of `TTL` for the alias target.\n- If you're associating this resource record set with a health check (if you're adding a `HealthCheckId` element), we recommend that you specify a `TTL` of 60 seconds or less so clients respond quickly to changes in health status.\n- All of the resource record sets in a group of weighted resource record sets must have the same value for `TTL` .\n- If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a `TTL` of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for `Weight` .", - "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "Weight": "*Weighted resource record sets only:* Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:\n\n- You must specify a value for the `Weight` element for every weighted resource record set.\n- You can only specify one `ResourceRecord` per weighted resource record set.\n- You can't create latency, failover, or geolocation resource record sets that have the same values for the `Name` and `Type` elements as weighted resource record sets.\n- You can create a maximum of 100 weighted resource record sets that have the same values for the `Name` and `Type` elements.\n- For weighted (but not weighted alias) resource record sets, if you set `Weight` to `0` for a resource record set, Route 53 never responds to queries with the applicable value for that resource record set. However, if you set `Weight` to `0` for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability.\n\nThe effect of setting `Weight` to `0` is different when you associate health checks with weighted resource record sets. For more information, see [Options for Configuring Route 53 Active-Active and Active-Passive Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) in the *Amazon Route 53 Developer Guide* ." }, "AWS::Route53::RecordSet AliasTarget": { @@ -37221,7 +37736,7 @@ "ResourceRecords": "Information about the records that you want to create. Each record should be in the format appropriate for the record type specified by the `Type` property. For information about different record types and their record formats, see [Values That You Specify When You Create or Edit Amazon Route 53 Records](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values.html) in the *Amazon Route 53 Developer Guide* .", "SetIdentifier": "*Resource record sets that have a routing policy other than simple:* An identifier that differentiates among multiple resource record sets that have the same combination of name and type, such as multiple weighted resource record sets named acme.example.com that have a type of A. In a group of resource record sets that have the same name and type, the value of `SetIdentifier` must be unique for each resource record set.\n\nFor information about routing policies, see [Choosing a Routing Policy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html) in the *Amazon Route 53 Developer Guide* .", "TTL": "The resource record cache time to live (TTL), in seconds. Note the following:\n\n- If you're creating or updating an alias resource record set, omit `TTL` . Amazon Route 53 uses the value of `TTL` for the alias target.\n- If you're associating this resource record set with a health check (if you're adding a `HealthCheckId` element), we recommend that you specify a `TTL` of 60 seconds or less so clients respond quickly to changes in health status.\n- All of the resource record sets in a group of weighted resource record sets must have the same value for `TTL` .\n- If a group of weighted resource record sets includes one or more weighted alias resource record sets for which the alias target is an ELB load balancer, we recommend that you specify a `TTL` of 60 seconds for all of the non-alias weighted resource record sets that have the same name and type. Values other than 60 seconds (the TTL for load balancers) will change the effect of the values that you specify for `Weight` .", - "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "Type": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "Weight": "*Weighted resource record sets only:* Among resource record sets that have the same combination of DNS name and type, a value that determines the proportion of DNS queries that Amazon Route 53 responds to using the current resource record set. Route 53 calculates the sum of the weights for the resource record sets that have the same combination of DNS name and type. Route 53 then responds to queries based on the ratio of a resource's weight to the total. Note the following:\n\n- You must specify a value for the `Weight` element for every weighted resource record set.\n- You can only specify one `ResourceRecord` per weighted resource record set.\n- You can't create latency, failover, or geolocation resource record sets that have the same values for the `Name` and `Type` elements as weighted resource record sets.\n- You can create a maximum of 100 weighted resource record sets that have the same values for the `Name` and `Type` elements.\n- For weighted (but not weighted alias) resource record sets, if you set `Weight` to `0` for a resource record set, Route 53 never responds to queries with the applicable value for that resource record set. However, if you set `Weight` to `0` for all resource record sets that have the same combination of DNS name and type, traffic is routed to all resources with equal probability.\n\nThe effect of setting `Weight` to `0` is different when you associate health checks with weighted resource record sets. For more information, see [Options for Configuring Route 53 Active-Active and Active-Passive Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) in the *Amazon Route 53 Developer Guide* ." }, "AWS::Route53RecoveryControl::Cluster": { @@ -38382,7 +38897,7 @@ "Content": "The content for the new SSM document in JSON or YAML. For more information about the schemas for SSM document content, see [SSM document schema features and examples](https://docs.aws.amazon.com/systems-manager/latest/userguide/document-schemas-features.html) in the *AWS Systems Manager User Guide* .\n\n> This parameter also supports `String` data types.", "DocumentFormat": "Specify the document format for the request. `JSON` is the default format.", "DocumentType": "The type of document to create.", - "Name": "A name for the SSM document.\n\n> You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n> \n> - `aws`\n> - `amazon`\n> - `amzn`", + "Name": "A name for the SSM document.\n\n> You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n> \n> - `aws`\n> - `amazon`\n> - `amzn`\n> - `AWSEC2`\n> - `AWSConfigRemediation`\n> - `AWSSupport`", "Requires": "A list of SSM documents required by a document. This parameter is used exclusively by AWS AppConfig . When a user creates an AWS AppConfig configuration in an SSM document, the user must also specify a required document for validation purposes. In this case, an `ApplicationConfiguration` document requires an `ApplicationConfigurationSchema` document for validation purposes. For more information, see [What is AWS AppConfig ?](https://docs.aws.amazon.com/appconfig/latest/userguide/what-is-appconfig.html) in the *AWS AppConfig User Guide* .", "Tags": "AWS CloudFormation resource tags to apply to the document. Use tags to help you identify and categorize resources.", "TargetType": "Specify a target type to define the kinds of resources the document can run on. For example, to run a document on EC2 instances, specify the following value: `/AWS::EC2::Instance` . If you specify a value of '/' the document can run on all types of resources. If you don't specify a value, the document can't run on any resources. For a list of valid resource types, see [AWS resource and property types reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) in the *AWS CloudFormation User Guide* .", @@ -38806,10 +39321,14 @@ }, "AWS::SageMaker::AppImageConfig": { "AppImageConfigName": "The name of the AppImageConfig. Must be unique to your account.", + "CodeEditorAppImageConfig": "The configuration for the file system and the runtime, such as the environment variables and entry point.", "JupyterLabAppImageConfig": "The configuration for the file system and the runtime, such as the environment variables and entry point.", "KernelGatewayImageConfig": "The configuration for the file system and kernels in the SageMaker image.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, + "AWS::SageMaker::AppImageConfig CodeEditorAppImageConfig": { + "ContainerConfig": "" + }, "AWS::SageMaker::AppImageConfig ContainerConfig": { "ContainerArguments": "The arguments for the container when you're running the application.", "ContainerEntrypoint": "The entrypoint used to run the application in the container.", @@ -38995,6 +39514,7 @@ "VpcId": "The ID of the Amazon Virtual Private Cloud (Amazon VPC) that Studio uses for communication.\n\n*Length Constraints* : Maximum length of 32.\n\n*Pattern* : `[-0-9a-zA-Z]+`" }, "AWS::SageMaker::Domain CodeEditorAppSettings": { + "CustomImages": "A list of custom SageMaker images that are configured to run as a Code Editor app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." }, @@ -39265,7 +39785,7 @@ "TableName": "The name of the Glue table." }, "AWS::SageMaker::FeatureGroup FeatureDefinition": { - "FeatureName": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start and end with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", + "FeatureName": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", "FeatureType": "The value type of a feature. Valid values are Integral, Fractional, or String." }, "AWS::SageMaker::FeatureGroup OfflineStoreConfig": { @@ -40414,6 +40934,7 @@ "UserSettings": "A collection of settings that apply to users of Amazon SageMaker Studio." }, "AWS::SageMaker::UserProfile CodeEditorAppSettings": { + "CustomImages": "A list of custom SageMaker images that are configured to run as a Code Editor app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." }, @@ -40779,12 +41300,161 @@ "AWS::SecurityHub::AutomationRule WorkflowUpdate": { "Status": "The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to `SUPPRESSED` or `RESOLVED` does not prevent a new finding for the same issue.\n\nThe allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets `WorkFlowStatus` from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- The record state changes from `ARCHIVED` to `ACTIVE` .\n- The compliance status changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated." }, + "AWS::SecurityHub::DelegatedAdmin": { + "AdminAccountId": "The AWS account identifier of the account to designate as the Security Hub administrator account." + }, "AWS::SecurityHub::Hub": { "AutoEnableControls": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .", "ControlFindingGenerator": "Specifies whether an account has consolidated control findings turned on or off. If the value for this field is set to `SECURITY_CONTROL` , Security Hub generates a single finding for a control check even when the check applies to multiple enabled standards.\n\nIf the value for this field is set to `STANDARD_CONTROL` , Security Hub generates separate findings for a control check when the check applies to multiple enabled standards.\n\nThe value for this field in a member account matches the value in the administrator account. For accounts that aren't part of an organization, the default value of this field is `SECURITY_CONTROL` if you enabled Security Hub on or after February 23, 2023.", "EnableDefaultStandards": "Whether to enable the security standards that Security Hub has designated as automatically enabled. If you don't provide a value for `EnableDefaultStandards` , it is set to `true` , and the designated standards are automatically enabled in each AWS Region where you enable Security Hub . If you don't want to enable the designated standards, set `EnableDefaultStandards` to `false` .\n\nCurrently, the automatically enabled standards are the Center for Internet Security (CIS) AWS Foundations Benchmark v1.2.0 and AWS Foundational Security Best Practices (FSBP).", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) ." }, + "AWS::SecurityHub::Insight": { + "Filters": "One or more attributes used to filter the findings included in the insight. The insight only includes findings that match the criteria defined in the filters. You can filter by up to ten finding attributes. For each attribute, you can provide up to 20 filter values.", + "GroupByAttribute": "The grouping attribute for the insight's findings. Indicates how to group the matching findings, and identifies the type of item that the insight applies to. For example, if an insight is grouped by resource identifier, then the insight produces a list of resource identifiers.", + "Name": "The name of a Security Hub insight." + }, + "AWS::SecurityHub::Insight AwsSecurityFindingFilters": { + "AwsAccountId": "The AWS account ID in which a finding is generated.", + "AwsAccountName": "The name of the AWS account in which a finding is generated.", + "CompanyName": "The name of the findings provider (company) that owns the solution (product) that generates findings.", + "ComplianceAssociatedStandardsId": "The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the [DescribeStandards](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_DescribeStandards.html) API response.", + "ComplianceSecurityControlId": "The unique identifier of a control across standards. Values for this field typically consist of an AWS service and a number, such as APIGateway.5.", + "ComplianceSecurityControlParametersName": "The name of a security control parameter.", + "ComplianceSecurityControlParametersValue": "The current value of a security control parameter.", + "ComplianceStatus": "Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details.", + "Confidence": "A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", + "CreatedAt": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "Criticality": "The level of importance assigned to the resources associated with the finding.\n\nA score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.", + "Description": "A finding's description.", + "FindingProviderFieldsConfidence": "The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", + "FindingProviderFieldsCriticality": "The finding provider value for the level of importance assigned to the resources associated with the findings.\n\nA score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.", + "FindingProviderFieldsRelatedFindingsId": "The finding identifier of a related finding that is identified by the finding provider.", + "FindingProviderFieldsRelatedFindingsProductArn": "The ARN of the solution that generated a related finding that is identified by the finding provider.", + "FindingProviderFieldsSeverityLabel": "The finding provider value for the severity label.", + "FindingProviderFieldsSeverityOriginal": "The finding provider's original value for the severity.", + "FindingProviderFieldsTypes": "One or more finding types that the finding provider assigned to the finding. Uses the format of `namespace/category/classifier` that classify a finding.\n\nValid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications", + "FirstObservedAt": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "GeneratorId": "The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.", + "Id": "The security findings provider-specific identifier for a finding.", + "Keyword": "This field is deprecated. A keyword for a finding.", + "LastObservedAt": "A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "MalwareName": "The name of the malware that was observed.", + "MalwarePath": "The filesystem path of the malware that was observed.", + "MalwareState": "The state of the malware that was observed.", + "MalwareType": "The type of the malware that was observed.", + "NetworkDestinationDomain": "The destination domain of network-related information about a finding.", + "NetworkDestinationIpV4": "The destination IPv4 address of network-related information about a finding.", + "NetworkDestinationIpV6": "The destination IPv6 address of network-related information about a finding.", + "NetworkDestinationPort": "The destination port of network-related information about a finding.", + "NetworkDirection": "Indicates the direction of network traffic associated with a finding.", + "NetworkProtocol": "The protocol of network-related information about a finding.", + "NetworkSourceDomain": "The source domain of network-related information about a finding.", + "NetworkSourceIpV4": "The source IPv4 address of network-related information about a finding.", + "NetworkSourceIpV6": "The source IPv6 address of network-related information about a finding.", + "NetworkSourceMac": "The source media access control (MAC) address of network-related information about a finding.", + "NetworkSourcePort": "The source port of network-related information about a finding.", + "NoteText": "The text of a note.", + "NoteUpdatedAt": "The timestamp of when the note was updated.", + "NoteUpdatedBy": "The principal that created a note.", + "ProcessLaunchedAt": "A timestamp that identifies when the process was launched.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "ProcessName": "The name of the process.", + "ProcessParentPid": "The parent process ID. This field accepts positive integers between `O` and `2147483647` .", + "ProcessPath": "The path to the process executable.", + "ProcessPid": "The process ID.", + "ProcessTerminatedAt": "A timestamp that identifies when the process was terminated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "ProductArn": "The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub.", + "ProductFields": "A data type where security findings providers can include additional solution-specific details that aren't part of the defined `AwsSecurityFinding` format.", + "ProductName": "The name of the solution (product) that generates findings.", + "RecommendationText": "The recommendation of what to do about the issue described in a finding.", + "RecordState": "The updated record state for the finding.", + "Region": "The Region from which the finding was generated.", + "RelatedFindingsId": "The solution-generated identifier for a related finding.", + "RelatedFindingsProductArn": "The ARN of the solution that generated a related finding.", + "ResourceApplicationArn": "The ARN of the application that is related to a finding.", + "ResourceApplicationName": "The name of the application that is related to a finding.", + "ResourceAwsEc2InstanceIamInstanceProfileArn": "The IAM profile ARN of the instance.", + "ResourceAwsEc2InstanceImageId": "The Amazon Machine Image (AMI) ID of the instance.", + "ResourceAwsEc2InstanceIpV4Addresses": "The IPv4 addresses associated with the instance.", + "ResourceAwsEc2InstanceIpV6Addresses": "The IPv6 addresses associated with the instance.", + "ResourceAwsEc2InstanceKeyName": "The key name associated with the instance.", + "ResourceAwsEc2InstanceLaunchedAt": "The date and time the instance was launched.", + "ResourceAwsEc2InstanceSubnetId": "The identifier of the subnet that the instance was launched in.", + "ResourceAwsEc2InstanceType": "The instance type of the instance.", + "ResourceAwsEc2InstanceVpcId": "The identifier of the VPC that the instance was launched in.", + "ResourceAwsIamAccessKeyCreatedAt": "The creation date/time of the IAM access key related to a finding.", + "ResourceAwsIamAccessKeyPrincipalName": "The name of the principal that is associated with an IAM access key.", + "ResourceAwsIamAccessKeyStatus": "The status of the IAM access key related to a finding.", + "ResourceAwsIamAccessKeyUserName": "This field is deprecated. The username associated with the IAM access key related to a finding.", + "ResourceAwsIamUserUserName": "The name of an IAM user.", + "ResourceAwsS3BucketOwnerId": "The canonical user ID of the owner of the S3 bucket.", + "ResourceAwsS3BucketOwnerName": "The display name of the owner of the S3 bucket.", + "ResourceContainerImageId": "The identifier of the image related to a finding.", + "ResourceContainerImageName": "The name of the image related to a finding.", + "ResourceContainerLaunchedAt": "A timestamp that identifies when the container was started.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "ResourceContainerName": "The name of the container related to a finding.", + "ResourceDetailsOther": "The details of a resource that doesn't have a specific subfield for the resource type defined.", + "ResourceId": "The canonical identifier for the given resource type.", + "ResourcePartition": "The canonical AWS partition name that the Region is assigned to.", + "ResourceRegion": "The canonical AWS external Region name where this resource is located.", + "ResourceTags": "A list of AWS tags associated with a resource at the time the finding was processed.", + "ResourceType": "Specifies the type of the resource that details are provided for.", + "Sample": "Indicates whether or not sample findings are included in the filter results.", + "SeverityLabel": "The label of a finding's severity.", + "SeverityNormalized": "Deprecated. The normalized severity of a finding. Instead of providing `Normalized` , provide `Label` .\n\nIf you provide `Label` and do not provide `Normalized` , then `Normalized` is set automatically as follows.\n\n- `INFORMATIONAL` - 0\n- `LOW` - 1\n- `MEDIUM` - 40\n- `HIGH` - 70\n- `CRITICAL` - 90", + "SeverityProduct": "Deprecated. This attribute isn't included in findings. Instead of providing `Product` , provide `Original` .\n\nThe native severity as defined by the AWS service or integrated partner product that generated the finding.", + "SourceUrl": "A URL that links to a page about the current finding in the security findings provider's solution.", + "ThreatIntelIndicatorCategory": "The category of a threat intelligence indicator.", + "ThreatIntelIndicatorLastObservedAt": "A timestamp that identifies the last observation of a threat intelligence indicator.", + "ThreatIntelIndicatorSource": "The source of the threat intelligence.", + "ThreatIntelIndicatorSourceUrl": "The URL for more details from the source of the threat intelligence.", + "ThreatIntelIndicatorType": "The type of a threat intelligence indicator.", + "ThreatIntelIndicatorValue": "The value of a threat intelligence indicator.", + "Title": "A finding's title.", + "Type": "A finding type in the format of `namespace/category/classifier` that classifies a finding.", + "UpdatedAt": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "UserDefinedFields": "A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.", + "VerificationState": "The veracity of a finding.", + "VulnerabilitiesExploitAvailable": "Indicates whether a software vulnerability in your environment has a known exploit. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", + "VulnerabilitiesFixAvailable": "Indicates whether a vulnerability is fixed in a newer version of the affected software packages. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", + "WorkflowState": "The workflow state of a finding.\n\nNote that this field is deprecated. To search for a finding based on its workflow status, use `WorkflowStatus` .", + "WorkflowStatus": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` ." + }, + "AWS::SecurityHub::Insight BooleanFilter": { + "Value": "The value of the boolean." + }, + "AWS::SecurityHub::Insight DateFilter": { + "DateRange": "A date range for the date filter.", + "End": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "Start": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )" + }, + "AWS::SecurityHub::Insight DateRange": { + "Unit": "A date range unit for the date filter.", + "Value": "A date range value for the date filter." + }, + "AWS::SecurityHub::Insight IpFilter": { + "Cidr": "A finding's CIDR value." + }, + "AWS::SecurityHub::Insight KeywordFilter": { + "Value": "A value for the keyword." + }, + "AWS::SecurityHub::Insight MapFilter": { + "Comparison": "The condition to apply to the key value when filtering Security Hub findings with a map filter.\n\nTo search for values that have the filter value, use one of the following comparison operators:\n\n- To search for values that include the filter value, use `CONTAINS` . For example, for the `ResourceTags` field, the filter `Department CONTAINS Security` matches findings that include the value `Security` for the `Department` tag. In the same example, a finding with a value of `Security team` for the `Department` tag is a match.\n- To search for values that exactly match the filter value, use `EQUALS` . For example, for the `ResourceTags` field, the filter `Department EQUALS Security` matches findings that have the value `Security` for the `Department` tag.\n\n`CONTAINS` and `EQUALS` filters on the same field are joined by `OR` . A finding matches if it matches any one of those filters. For example, the filters `Department CONTAINS Security OR Department CONTAINS Finance` match a finding that includes either `Security` , `Finance` , or both values.\n\nTo search for values that don't have the filter value, use one of the following comparison operators:\n\n- To search for values that exclude the filter value, use `NOT_CONTAINS` . For example, for the `ResourceTags` field, the filter `Department NOT_CONTAINS Finance` matches findings that exclude the value `Finance` for the `Department` tag.\n- To search for values other than the filter value, use `NOT_EQUALS` . For example, for the `ResourceTags` field, the filter `Department NOT_EQUALS Finance` matches findings that don\u2019t have the value `Finance` for the `Department` tag.\n\n`NOT_CONTAINS` and `NOT_EQUALS` filters on the same field are joined by `AND` . A finding matches only if it matches all of those filters. For example, the filters `Department NOT_CONTAINS Security AND Department NOT_CONTAINS Finance` match a finding that excludes both the `Security` and `Finance` values.\n\n`CONTAINS` filters can only be used with other `CONTAINS` filters. `NOT_CONTAINS` filters can only be used with other `NOT_CONTAINS` filters.\n\nYou can\u2019t have both a `CONTAINS` filter and a `NOT_CONTAINS` filter on the same field. Similarly, you can\u2019t have both an `EQUALS` filter and a `NOT_EQUALS` filter on the same field. Combining filters in this way returns an error.\n\n`CONTAINS` and `NOT_CONTAINS` operators can be used only with automation rules. For more information, see [Automation rules](https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) in the *AWS Security Hub User Guide* .", + "Key": "The key of the map filter. For example, for `ResourceTags` , `Key` identifies the name of the tag. For `UserDefinedFields` , `Key` is the name of the field.", + "Value": "The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called `Department` might be `Security` . If you provide `security` as the filter value, then there's no match." + }, + "AWS::SecurityHub::Insight NumberFilter": { + "Eq": "The equal-to condition to be applied to a single field when querying for findings.", + "Gte": "The greater-than-equal condition to be applied to a single field when querying for findings.", + "Lte": "The less-than-equal condition to be applied to a single field when querying for findings." + }, + "AWS::SecurityHub::Insight StringFilter": { + "Comparison": "The condition to apply to a string value when filtering Security Hub findings.\n\nTo search for values that have the filter value, use one of the following comparison operators:\n\n- To search for values that include the filter value, use `CONTAINS` . For example, the filter `Title CONTAINS CloudFront` matches findings that have a `Title` that includes the string CloudFront.\n- To search for values that exactly match the filter value, use `EQUALS` . For example, the filter `AwsAccountId EQUALS 123456789012` only matches findings that have an account ID of `123456789012` .\n- To search for values that start with the filter value, use `PREFIX` . For example, the filter `ResourceRegion PREFIX us` matches findings that have a `ResourceRegion` that starts with `us` . A `ResourceRegion` that starts with a different value, such as `af` , `ap` , or `ca` , doesn't match.\n\n`CONTAINS` , `EQUALS` , and `PREFIX` filters on the same field are joined by `OR` . A finding matches if it matches any one of those filters. For example, the filters `Title CONTAINS CloudFront OR Title CONTAINS CloudWatch` match a finding that includes either `CloudFront` , `CloudWatch` , or both strings in the title.\n\nTo search for values that don\u2019t have the filter value, use one of the following comparison operators:\n\n- To search for values that exclude the filter value, use `NOT_CONTAINS` . For example, the filter `Title NOT_CONTAINS CloudFront` matches findings that have a `Title` that excludes the string CloudFront.\n- To search for values other than the filter value, use `NOT_EQUALS` . For example, the filter `AwsAccountId NOT_EQUALS 123456789012` only matches findings that have an account ID other than `123456789012` .\n- To search for values that don't start with the filter value, use `PREFIX_NOT_EQUALS` . For example, the filter `ResourceRegion PREFIX_NOT_EQUALS us` matches findings with a `ResourceRegion` that starts with a value other than `us` .\n\n`NOT_CONTAINS` , `NOT_EQUALS` , and `PREFIX_NOT_EQUALS` filters on the same field are joined by `AND` . A finding matches only if it matches all of those filters. For example, the filters `Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch` match a finding that excludes both `CloudFront` and `CloudWatch` in the title.\n\nYou can\u2019t have both a `CONTAINS` filter and a `NOT_CONTAINS` filter on the same field. Similarly, you can't provide both an `EQUALS` filter and a `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filter on the same field. Combining filters in this way returns an error. `CONTAINS` filters can only be used with other `CONTAINS` filters. `NOT_CONTAINS` filters can only be used with other `NOT_CONTAINS` filters.\n\nYou can combine `PREFIX` filters with `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filters for the same field. Security Hub first processes the `PREFIX` filters, and then the `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filters.\n\nFor example, for the following filters, Security Hub first identifies findings that have resource types that start with either `AwsIam` or `AwsEc2` . It then excludes findings that have a resource type of `AwsIamPolicy` and findings that have a resource type of `AwsEc2NetworkInterface` .\n\n- `ResourceType PREFIX AwsIam`\n- `ResourceType PREFIX AwsEc2`\n- `ResourceType NOT_EQUALS AwsIamPolicy`\n- `ResourceType NOT_EQUALS AwsEc2NetworkInterface`\n\n`CONTAINS` and `NOT_CONTAINS` operators can be used only with automation rules. For more information, see [Automation rules](https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) in the *AWS Security Hub User Guide* .", + "Value": "The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is `Security Hub` . If you provide `security hub` as the filter value, there's no match." + }, + "AWS::SecurityHub::ProductSubscription": { + "ProductArn": "The ARN of the product to enable the integration for." + }, "AWS::SecurityHub::Standard": { "DisabledStandardsControls": "Specifies which controls are to be disabled in a standard.\n\n*Maximum* : `100`", "StandardsArn": "The ARN of the standard that you want to enable. To view a list of available Security Hub standards and their ARNs, use the [`DescribeStandards`](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_DescribeStandards.html) API operation." @@ -40793,6 +41463,70 @@ "Reason": "A user-defined reason for changing a control's enablement status in a specified standard. If you are disabling a control, then this property is required.", "StandardsControlArn": "The Amazon Resource Name (ARN) of the control." }, + "AWS::SecurityLake::AwsLogSource": { + "Accounts": "Specify the AWS account information where you want to enable Security Lake.", + "DataLakeArn": "The Amazon Resource Name (ARN) used to create the data lake.", + "SourceName": "The name for a AWS source. This must be a Regionally unique value. For the list of sources supported by Amazon Security Lake see [Collecting data from AWS services](https://docs.aws.amazon.com//security-lake/latest/userguide/internal-sources.html) in the Amazon Security Lake User Guide.", + "SourceVersion": "The version for a AWS source. For more details about source versions supported by Amazon Security Lake see [OCSF source identification](https://docs.aws.amazon.com//security-lake/latest/userguide/open-cybersecurity-schema-framework.html#ocsf-source-identification) in the Amazon Security Lake User Guide. This must be a Regionally unique value." + }, + "AWS::SecurityLake::DataLake": { + "EncryptionConfiguration": "Provides encryption details of the Amazon Security Lake object.", + "LifecycleConfiguration": "You can customize Security Lake to store data in your preferred AWS Regions for your preferred amount of time. Lifecycle management can help you comply with different compliance requirements. For more details, see [Lifecycle management](https://docs.aws.amazon.com//security-lake/latest/userguide/lifecycle-management.html) in the Amazon Security Lake User Guide.", + "MetaStoreManagerRoleArn": "The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources.", + "ReplicationConfiguration": "Provides replication details of Amazon Security Lake object.", + "Tags": "An array of objects, one for each tag to associate with the data lake configuration. For each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but it can be an empty string." + }, + "AWS::SecurityLake::DataLake EncryptionConfiguration": { + "KmsKeyId": "The ID of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object." + }, + "AWS::SecurityLake::DataLake Expiration": { + "Days": "The number of days before data expires in the Amazon Security Lake object." + }, + "AWS::SecurityLake::DataLake LifecycleConfiguration": { + "Expiration": "Provides data expiration details of the Amazon Security Lake object.", + "Transitions": "Provides data storage transition details of Amazon Security Lake object. By configuring these settings, you can specify your preferred Amazon S3 storage class and the time period for S3 objects to stay in that storage class before they transition to a different storage class." + }, + "AWS::SecurityLake::DataLake ReplicationConfiguration": { + "Regions": "Specifies one or more centralized rollup Regions. The AWS Region specified in the region parameter of the `CreateDataLake` or `UpdateDataLake` operations contributes data to the rollup Region or Regions specified in this parameter.\n\nReplication enables automatic, asynchronous copying of objects across Amazon S3 buckets. S3 buckets that are configured for object replication can be owned by the same AWS account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Regions or within the same Region as the source bucket.", + "RoleArn": "Replication settings for the Amazon S3 buckets. This parameter uses the AWS Identity and Access Management (IAM) role you created that is managed by Security Lake , to ensure the replication setting is correct." + }, + "AWS::SecurityLake::DataLake Tag": { + "Key": "The name of the tag. This is a general label that acts as a category for a more specific tag value ( `value` ).", + "Value": "The value that\u2019s associated with the specified tag key ( `key` ). This value acts as a descriptor for the tag key. A tag value cannot be null, but it can be an empty string." + }, + "AWS::SecurityLake::DataLake Transitions": { + "Days": "The number of days before data transitions to a different S3 Storage Class in the Amazon Security Lake object.", + "StorageClass": "The list of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads. The default storage class is S3 Standard." + }, + "AWS::SecurityLake::Subscriber": { + "AccessTypes": "You can choose to notify subscribers of new objects with an Amazon Simple Queue Service (Amazon SQS) queue or through messaging to an HTTPS endpoint provided by the subscriber.\n\nSubscribers can consume data by directly querying AWS Lake Formation tables in your Amazon S3 bucket through services like Amazon Athena. This subscription type is defined as `LAKEFORMATION` .", + "DataLakeArn": "The Amazon Resource Name (ARN) used to create the data lake.", + "Sources": "Amazon Security Lake supports log and event collection for natively supported AWS services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", + "SubscriberDescription": "The subscriber descriptions for a subscriber account. The description for a subscriber includes `subscriberName` , `accountID` , `externalID` , and `subscriberId` .", + "SubscriberIdentity": "The AWS identity used to access your data.", + "SubscriberName": "The name of your Amazon Security Lake subscriber account.", + "Tags": "An array of objects, one for each tag to associate with the subscriber. For each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but it can be an empty string." + }, + "AWS::SecurityLake::Subscriber AwsLogSource": { + "SourceName": "Source name of the natively supported AWS service that is supported as an Amazon Security Lake source. For the list of sources supported by Amazon Security Lake see [Collecting data from AWS services](https://docs.aws.amazon.com//security-lake/latest/userguide/internal-sources.html) in the Amazon Security Lake User Guide.", + "SourceVersion": "Source version of the natively supported AWS service that is supported as an Amazon Security Lake source. For more details about source versions supported by Amazon Security Lake see [OCSF source identification](https://docs.aws.amazon.com//security-lake/latest/userguide/open-cybersecurity-schema-framework.html#ocsf-source-identification) in the Amazon Security Lake User Guide." + }, + "AWS::SecurityLake::Subscriber CustomLogSource": { + "SourceName": "The name of the custom log source.", + "SourceVersion": "The source version of the custom log source." + }, + "AWS::SecurityLake::Subscriber Source": { + "AwsLogSource": "The natively supported AWS service which is used a Amazon Security Lake source to collect logs and events from.", + "CustomLogSource": "The custom log source AWS which is used a Amazon Security Lake source to collect logs and events from." + }, + "AWS::SecurityLake::Subscriber SubscriberIdentity": { + "ExternalId": "The external ID is a unique identifier that the subscriber provides to you.", + "Principal": "Principals can include accounts, users, roles, federated users, or AWS services." + }, + "AWS::SecurityLake::Subscriber Tag": { + "Key": "The name of the tag. This is a general label that acts as a category for a more specific tag value ( `value` ).", + "Value": "The value that\u2019s associated with the specified tag key ( `key` ). This value acts as a descriptor for the tag key. A tag value cannot be null, but it can be an empty string." + }, "AWS::ServiceCatalog::AcceptedPortfolioShare": { "AcceptLanguage": "The language code.\n\n- `jp` - Japanese\n- `zh` - Chinese", "PortfolioId": "The portfolio identifier." @@ -41315,6 +42049,34 @@ "Key": "The key of the tag. Tag keys are case sensitive.", "Value": "The value of the tag. Tag values are case-sensitive and can be null." }, + "AWS::Timestream::InfluxDBInstance": { + "AllocatedStorage": "The amount of storage to allocate for your DB storage type in GiB (gibibytes).", + "Bucket": "The name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization.", + "DbInstanceType": "The Timestream for InfluxDB DB instance type to run on.", + "DbParameterGroupIdentifier": "The name or id of the DB parameter group to assign to your DB instance. DB parameter groups specify how the database is configured. For example, DB parameter groups can specify the limit for query concurrency.", + "DbStorageType": "The Timestream for InfluxDB DB storage type to read and write InfluxDB data.\n\nYou can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements:\n\n- Influx IO Included 3000 IOPS\n- Influx IO Included 12000 IOPS\n- Influx IO Included 16000 IOPS", + "DeploymentType": "Specifies whether the Timestream for InfluxDB is deployed as Single-AZ or with a MultiAZ Standby for High availability.", + "LogDeliveryConfiguration": "Configuration for sending InfluxDB engine logs to a specified S3 bucket.", + "Name": "The name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region.", + "Organization": "The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users.", + "Password": "The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon SecretManager in your account.", + "PubliclyAccessible": "Configures the DB instance with a public IP to facilitate access.", + "Tags": "A list of key-value pairs to associate with the DB instance.", + "Username": "The username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. For example, my-user1. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Secrets Manager in your account.", + "VpcSecurityGroupIds": "A list of VPC security group IDs to associate with the DB instance.", + "VpcSubnetIds": "A list of VPC subnet IDs to associate with the DB instance. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby." + }, + "AWS::Timestream::InfluxDBInstance LogDeliveryConfiguration": { + "S3Configuration": "Configuration for S3 bucket log delivery" + }, + "AWS::Timestream::InfluxDBInstance S3Configuration": { + "BucketName": "The bucket name of the customer S3 bucket.", + "Enabled": "Indicates whether log delivery to the S3 bucket is enabled." + }, + "AWS::Timestream::InfluxDBInstance Tag": { + "Key": "The key of the tag. Tag keys are case sensitive.", + "Value": "The value of the tag. Tag values are case-sensitive and can be null." + }, "AWS::Timestream::ScheduledQuery": { "ClientToken": "Using a ClientToken makes the call to CreateScheduledQuery idempotent, in other words, making the same request repeatedly will produce the same result. Making multiple identical CreateScheduledQuery requests has the same effect as making a single request.\n\n- If CreateScheduledQuery is called without a `ClientToken` , the Query SDK generates a `ClientToken` on your behalf.\n- After 8 hours, any request with the same `ClientToken` is treated as a new request.", "ErrorReportConfiguration": "Configuration for error reporting. Error reports will be generated when a problem is encountered when writing the query results.", @@ -41439,7 +42201,7 @@ "InactiveDate": "An optional date that specifies when the certificate becomes inactive.", "PrivateKey": "The file that contains the private key for the certificate that's being imported.", "Tags": "Key-value pairs that can be used to group and search for certificates.", - "Usage": "Specifies whether this certificate is used for signing or encryption." + "Usage": "Specifies how this certificate is used. It can be used in the following ways:\n\n- `SIGNING` : For signing AS2 messages\n- `ENCRYPTION` : For encrypting AS2 messages\n- `TLS` : For securing AS2 communications sent over HTTPS" }, "AWS::Transfer::Certificate Tag": { "Key": "The name assigned to the tag that you create.", @@ -41484,7 +42246,7 @@ }, "AWS::Transfer::Server": { "Certificate": "The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when `Protocols` is set to `FTPS` .\n\nTo request a new public certificate, see [Request a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the *AWS Certificate Manager User Guide* .\n\nTo import an existing certificate into ACM, see [Importing certificates into ACM](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *AWS Certificate Manager User Guide* .\n\nTo request a private certificate to use FTPS through private IP addresses, see [Request a private certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) in the *AWS Certificate Manager User Guide* .\n\nCertificates with the following cryptographic algorithms and key sizes are supported:\n\n- 2048-bit RSA (RSA_2048)\n- 4096-bit RSA (RSA_4096)\n- Elliptic Prime Curve 256 bit (EC_prime256v1)\n- Elliptic Prime Curve 384 bit (EC_secp384r1)\n- Elliptic Prime Curve 521 bit (EC_secp521r1)\n\n> The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer.", - "Domain": "Specifies the domain of the storage system that is used for file transfers.", + "Domain": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.", "EndpointDetails": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint.", "EndpointType": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n> After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n> \n> For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n> \n> It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` .", "IdentityProviderDetails": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Not required when `IdentityProviderType` is set to `SERVICE_MANAGED` .", @@ -41495,7 +42257,7 @@ "ProtocolDetails": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`", "Protocols": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n> - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n> - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n> - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n> - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n> - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`", "S3StorageOptions": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target.", - "SecurityPolicyName": "Specifies the name of the security policy that is attached to the server.", + "SecurityPolicyName": "Specifies the name of the security policy for the server.", "StructuredLogDestinations": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`", "Tags": "Key-value pairs that can be used to group and search for servers.", "WorkflowDetails": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.\n\nIn addition to a workflow to execute when a file is uploaded completely, `WorkflowDetails` can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects." @@ -41627,23 +42389,21 @@ "Type": "Currently, the following step types are supported.\n\n- *`COPY`* - Copy the file to another location.\n- *`CUSTOM`* - Perform a custom step with an AWS Lambda function target.\n- *`DECRYPT`* - Decrypt a file that was encrypted before it was uploaded.\n- *`DELETE`* - Delete the file.\n- *`TAG`* - Add a tag to the file." }, "AWS::VerifiedPermissions::IdentitySource": { - "Configuration": "Contains configuration information used when creating a new identity source.\n\n> At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.\n> \n> You must specify a `userPoolArn` , and optionally, a `ClientId` . \n\nThis data type is used as a request parameter for the [CreateIdentitySource](https://docs.aws.amazon.com/verifiedpermissions/latest/apireference/API_CreateIdentitySource.html) operation.", + "Configuration": "Contains configuration information about an identity source.", "PolicyStoreId": "Specifies the ID of the policy store in which you want to store this identity source. Only policies and requests made using this policy store can reference identities from the identity provider configured in the new identity source.", "PrincipalEntityType": "Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source." }, + "AWS::VerifiedPermissions::IdentitySource CognitoGroupConfiguration": { + "GroupEntityType": "The name of the schema entity type that's mapped to the user pool group. Defaults to `AWS::CognitoGroup` ." + }, "AWS::VerifiedPermissions::IdentitySource CognitoUserPoolConfiguration": { "ClientIds": "The unique application client IDs that are associated with the specified Amazon Cognito user pool.\n\nExample: `\"ClientIds\": [\"&ExampleCogClientId;\"]`", + "GroupConfiguration": "The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source.", "UserPoolArn": "The [Amazon Resource Name (ARN)](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) of the Amazon Cognito user pool that contains the identities to be authorized." }, "AWS::VerifiedPermissions::IdentitySource IdentitySourceConfiguration": { "CognitoUserPoolConfiguration": "A structure that contains configuration information used when creating or updating an identity source that represents a connection to an Amazon Cognito user pool used as an identity provider for Verified Permissions ." }, - "AWS::VerifiedPermissions::IdentitySource IdentitySourceDetails": { - "ClientIds": "The application client IDs associated with the specified Amazon Cognito user pool that are enabled for this identity source.", - "DiscoveryUrl": "The well-known URL that points to this user pool's OIDC discovery endpoint. This is a URL string in the following format. This URL replaces the placeholders for both the AWS Region and the user pool identifier with those appropriate for this user pool.\n\n`https://cognito-idp. ** .amazonaws.com/ ** /.well-known/openid-configuration`", - "OpenIdIssuer": "A string that identifies the type of OIDC service represented by this identity source.\n\nAt this time, the only valid value is `cognito` .", - "UserPoolArn": "The [Amazon Resource Name (ARN)](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) of the Amazon Cognito user pool whose identities are accessible to this Verified Permissions policy store." - }, "AWS::VerifiedPermissions::Policy": { "Definition": "Specifies the policy type and content to use for the new or updated policy. The definition structure must include either a `Static` or a `TemplateLinked` element.", "PolicyStoreId": "Specifies the `PolicyStoreId` of the policy store you want to store the policy in." @@ -42771,8 +43531,8 @@ "KmsKeyId": "The customer managed key used for encryption. The customer managed key must have a policy that allows `kms:CreateGrant` and `kms:DescribeKey` permissions to the IAM identity using the key to invoke Wisdom. To use Wisdom with chat, the key policy must also allow `kms:Decrypt` , `kms:GenerateDataKey*` , and `kms:DescribeKey` permissions to the `connect.amazonaws.com` service principal. For more information about setting up a customer managed key for Wisdom, see [Enable Amazon Connect Wisdom for your instance](https://docs.aws.amazon.com/connect/latest/adminguide/enable-wisdom.html) . For information about valid ID values, see [Key identifiers (KeyId)](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id) in the *AWS Key Management Service Developer Guide* ." }, "AWS::Wisdom::Assistant Tag": { - "Key": "", - "Value": "" + "Key": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", + "Value": "The tag value can be up to 256 characters." }, "AWS::Wisdom::AssistantAssociation": { "AssistantId": "The identifier of the Wisdom assistant.", @@ -42784,8 +43544,8 @@ "KnowledgeBaseId": "The identifier of the knowledge base." }, "AWS::Wisdom::AssistantAssociation Tag": { - "Key": "", - "Value": "" + "Key": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", + "Value": "The tag value can be up to 256 characters." }, "AWS::Wisdom::KnowledgeBase": { "Description": "The description.", @@ -42798,7 +43558,7 @@ }, "AWS::Wisdom::KnowledgeBase AppIntegrationsConfiguration": { "AppIntegrationArn": "The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` as source fields.\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , your AppIntegrations DataIntegration must have an ObjectConfiguration if objectFields is not provided, including at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` as source fields.\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , your AppIntegrations DataIntegration must have an ObjectConfiguration if `objectFields` is not provided, including at least `id` , `title` , `updated_at` , and `draft` as source fields.\n- For [SharePoint](https://docs.aws.amazon.com/https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/sharepoint-net-server-csom-jsom-and-rest-api-index) , your AppIntegrations DataIntegration must have a FileConfiguration, including only file extensions that are among `docx` , `pdf` , `html` , `htm` , and `txt` .\n- For [Amazon S3](https://docs.aws.amazon.com/https://aws.amazon.com/s3/) , the ObjectConfiguration and FileConfiguration of your AppIntegrations DataIntegration must be null. The `SourceURI` of your DataIntegration must use the following format: `s3://your_s3_bucket_name` .\n\n> The bucket policy of the corresponding S3 bucket must allow the AWS principal `app-integrations.amazonaws.com` to perform `s3:ListBucket` , `s3:GetObject` , and `s3:GetBucketLocation` against the bucket.", - "ObjectFields": "The fields from the source that are made available to your agents in Amazon Q. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations." + "ObjectFields": "The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations." }, "AWS::Wisdom::KnowledgeBase RenderingConfiguration": { "TemplateUri": "A URI template containing exactly one variable in `${variableName}` format. This can only be set for `EXTERNAL` knowledge bases. For Salesforce, ServiceNow, and Zendesk, the variable must be one of the following:\n\n- Salesforce: `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , or `IsDeleted`\n- ServiceNow: `number` , `short_description` , `sys_mod_count` , `workflow_state` , or `active`\n- Zendesk: `id` , `title` , `updated_at` , or `draft`\n\nThe variable is replaced with the actual value for a piece of content when calling [GetContent](https://docs.aws.amazon.com/amazon-q-connect/latest/APIReference/API_GetContent.html) ." @@ -42810,8 +43570,8 @@ "AppIntegrations": "Configuration information for Amazon AppIntegrations to automatically ingest content." }, "AWS::Wisdom::KnowledgeBase Tag": { - "Key": "", - "Value": "" + "Key": "The key-value string map. The valid character set is `[a-zA-Z+-=._:/]` . The tag key can be up to 128 characters and must not start with `aws:` .", + "Value": "The tag value can be up to 256 characters." }, "AWS::WorkSpaces::ConnectionAlias": { "ConnectionString": "The connection string specified for the connection alias. The connection string must be in the form of a fully qualified domain name (FQDN), such as `www.example.com` .", @@ -42850,8 +43610,8 @@ }, "AWS::WorkSpacesThinClient::Environment": { "DesiredSoftwareSetId": "The ID of the software set to apply.", - "DesktopArn": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces , WorkSpaces Web, or AppStream 2.0 .", - "DesktopEndpoint": "The URL for the identity provider login (only for environments that use AppStream 2.0 ).", + "DesktopArn": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.", + "DesktopEndpoint": "The URL for the identity provider login (only for environments that use AppStream 2.0).", "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS Key Management Service key used to encrypt the environment.", "MaintenanceWindow": "A specification for a time window to apply software updates.", "Name": "The name of the environment.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 0ae42bb8f..70e3eb752 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -2999,11 +2999,6 @@ "title": "AutoSubDomainIAMRole", "type": "string" }, - "Certificate": { - "$ref": "#/definitions/AWS::Amplify::Domain.Certificate", - "markdownDescription": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", - "title": "Certificate" - }, "CertificateSettings": { "$ref": "#/definitions/AWS::Amplify::Domain.CertificateSettings", "markdownDescription": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", @@ -3026,11 +3021,6 @@ "markdownDescription": "The setting for the subdomain.", "title": "SubDomainSettings", "type": "array" - }, - "UpdateStatus": { - "markdownDescription": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to.", - "title": "UpdateStatus", - "type": "string" } }, "required": [ @@ -9015,7 +9005,7 @@ "type": "string" }, "KmsKeyIdentifier": { - "markdownDescription": "", + "markdownDescription": "The AWS Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.", "title": "KmsKeyIdentifier", "type": "string" }, @@ -9180,7 +9170,7 @@ "items": { "$ref": "#/definitions/AWS::AppConfig::Deployment.DynamicExtensionParameters" }, - "markdownDescription": "The parameters accepted by the extension. You specify parameter values when you associate the extension to an AWS AppConfig resource by using the `CreateExtensionAssociation` API action. For AWS Lambda extension actions, these parameters are included in the Lambda request object.", + "markdownDescription": "A map of dynamic extension parameter names to values to pass to associated extensions with `PRE_START_DEPLOYMENT` actions.", "title": "DynamicExtensionParameters", "type": "array" }, @@ -9237,17 +9227,17 @@ "additionalProperties": false, "properties": { "ExtensionReference": { - "markdownDescription": "", + "markdownDescription": "The ARN or ID of the extension for which you are inserting a dynamic parameter.", "title": "ExtensionReference", "type": "string" }, "ParameterName": { - "markdownDescription": "", + "markdownDescription": "The parameter name.", "title": "ParameterName", "type": "string" }, "ParameterValue": { - "markdownDescription": "", + "markdownDescription": "The parameter value.", "title": "ParameterValue", "type": "string" } @@ -12630,6 +12620,14 @@ "title": "Namespace", "type": "string" }, + "Permissions": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "Permissions", + "type": "array" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -12699,8 +12697,7 @@ } }, "required": [ - "AccessUrl", - "ApprovedOrigins" + "AccessUrl" ], "type": "object" }, @@ -16982,7 +16979,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of metadata items that you can associate with your VPC connector resource. A tag is a key-value pair.", + "markdownDescription": "A list of metadata items that you can associate with your VPC connector resource. A tag is a key-value pair.\n\n> A `VpcConnector` is immutable, so you cannot update its tags. To change the tags, replace the resource. To replace a `VpcConnector` , you must provide a new combination of security groups.", "title": "Tags", "type": "array" }, @@ -22697,7 +22694,7 @@ "type": "string" }, "Cooldown": { - "markdownDescription": "*Only needed if you use simple scaling policies.*\n\nThe amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `300` seconds", + "markdownDescription": "*Only needed if you use simple scaling policies.*\n\nThe amount of time, in seconds, between one scaling activity ending and another one starting due to simple scaling policies. For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `300` seconds", "title": "Cooldown", "type": "string" }, @@ -22712,7 +22709,7 @@ "type": "string" }, "DesiredCapacityType": { - "markdownDescription": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Creating an Auto Scaling group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", + "markdownDescription": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Create a mixed instances group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", "title": "DesiredCapacityType", "type": "string" }, @@ -22722,7 +22719,7 @@ "type": "number" }, "HealthCheckType": { - "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "title": "HealthCheckType", "type": "string" }, @@ -22763,7 +22760,7 @@ "type": "array" }, "MaxInstanceLifetime": { - "markdownDescription": "The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see [Replacing Auto Scaling instances based on maximum instance lifetime](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "The maximum amount of time, in seconds, that an instance can be in service. The default is null. If specified, the value must be either 0 or a number equal to or greater than 86,400 seconds (1 day). For more information, see [Replace Auto Scaling instances based on maximum instance lifetime](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "MaxInstanceLifetime", "type": "number" }, @@ -22791,7 +22788,7 @@ "title": "MixedInstancesPolicy" }, "NewInstancesProtectedFromScaleIn": { - "markdownDescription": "Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see [Using instance scale-in protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Indicates whether newly launched instances are protected from termination by Amazon EC2 Auto Scaling when scaling in. For more information about preventing instances from terminating on scale in, see [Use instance scale-in protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "NewInstancesProtectedFromScaleIn", "type": "boolean" }, @@ -22833,7 +22830,7 @@ "items": { "type": "string" }, - "markdownDescription": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Work with Amazon EC2 Auto Scaling termination policies](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", + "markdownDescription": "A policy or a list of policies that are used to select the instance to terminate. These policies are executed in the order that you list them. For more information, see [Configure termination policies for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid values: `Default` | `AllocationStrategy` | `ClosestToNextInstanceHour` | `NewestInstance` | `OldestInstance` | `OldestLaunchConfiguration` | `OldestLaunchTemplate` | `arn:aws:lambda:region:account-id:function:my-function:my-alias`", "title": "TerminationPolicies", "type": "array" }, @@ -23161,7 +23158,7 @@ "title": "InstanceRequirements" }, "InstanceType": { - "markdownDescription": "The instance type, such as `m3.xlarge` . You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon Elastic Compute Cloud User Guide* .\n\nYou can specify up to 40 instance types per Auto Scaling group.", + "markdownDescription": "The instance type, such as `m3.xlarge` . You must specify an instance type that is supported in your requested Region and Availability Zones. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nYou can specify up to 40 instance types per Auto Scaling group.", "title": "InstanceType", "type": "string" }, @@ -23236,7 +23233,7 @@ "type": "string" }, "RoleARN": { - "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Configure a notification target for a lifecycle hook](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", + "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Prepare to add a lifecycle hook to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", "title": "RoleARN", "type": "string" } @@ -23291,7 +23288,7 @@ "items": { "type": "string" }, - "markdownDescription": "Identifies the metrics to enable.\n\nYou can specify one or more of the following metrics:\n\n- `GroupMinSize`\n- `GroupMaxSize`\n- `GroupDesiredCapacity`\n- `GroupInServiceInstances`\n- `GroupPendingInstances`\n- `GroupStandbyInstances`\n- `GroupTerminatingInstances`\n- `GroupTotalInstances`\n- `GroupInServiceCapacity`\n- `GroupPendingCapacity`\n- `GroupStandbyCapacity`\n- `GroupTerminatingCapacity`\n- `GroupTotalCapacity`\n- `WarmPoolDesiredCapacity`\n- `WarmPoolWarmedCapacity`\n- `WarmPoolPendingCapacity`\n- `WarmPoolTerminatingCapacity`\n- `WarmPoolTotalCapacity`\n- `GroupAndWarmPoolDesiredCapacity`\n- `GroupAndWarmPoolTotalCapacity`\n\nIf you specify `Granularity` and don't specify any metrics, all metrics are enabled.\n\nFor more information, see [Auto Scaling group metrics](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Identifies the metrics to enable.\n\nYou can specify one or more of the following metrics:\n\n- `GroupMinSize`\n- `GroupMaxSize`\n- `GroupDesiredCapacity`\n- `GroupInServiceInstances`\n- `GroupPendingInstances`\n- `GroupStandbyInstances`\n- `GroupTerminatingInstances`\n- `GroupTotalInstances`\n- `GroupInServiceCapacity`\n- `GroupPendingCapacity`\n- `GroupStandbyCapacity`\n- `GroupTerminatingCapacity`\n- `GroupTotalCapacity`\n- `WarmPoolDesiredCapacity`\n- `WarmPoolWarmedCapacity`\n- `WarmPoolPendingCapacity`\n- `WarmPoolTerminatingCapacity`\n- `WarmPoolTotalCapacity`\n- `GroupAndWarmPoolDesiredCapacity`\n- `GroupAndWarmPoolTotalCapacity`\n\nIf you specify `Granularity` and don't specify any metrics, all metrics are enabled.\n\nFor more information, see [Amazon CloudWatch metrics for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "Metrics", "type": "array" } @@ -23471,7 +23468,7 @@ "additionalProperties": false, "properties": { "AssociatePublicIpAddress": { - "markdownDescription": "Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.\n\nIf you specify `true` , each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see [Launching Auto Scaling instances in a VPC](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify this property, you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.", + "markdownDescription": "Specifies whether to assign a public IPv4 address to the group's instances. If the instance is launched into a default subnet, the default is to assign a public IPv4 address, unless you disabled the option to assign a public IPv4 address on the subnet. If the instance is launched into a nondefault subnet, the default is not to assign a public IPv4 address, unless you enabled the option to assign a public IPv4 address on the subnet.\n\nIf you specify `true` , each instance in the Auto Scaling group receives a unique public IPv4 address. For more information, see [Provide network connectivity for your Auto Scaling instances using Amazon VPC](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify this property, you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.", "title": "AssociatePublicIpAddress", "type": "boolean" }, @@ -23497,7 +23494,7 @@ "type": "array" }, "EbsOptimized": { - "markdownDescription": "Specifies whether the launch configuration is optimized for EBS I/O ( `true` ) or not ( `false` ). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see [Amazon EBS-optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nThe default value is `false` .", + "markdownDescription": "Specifies whether the launch configuration is optimized for EBS I/O ( `true` ) or not ( `false` ). The optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal I/O performance. This optimization is not available with all instance types. Additional fees are incurred when you enable EBS optimization for an instance type that is not EBS-optimized by default. For more information, see [Amazon EBS-optimized instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nThe default value is `false` .", "title": "EbsOptimized", "type": "boolean" }, @@ -23507,7 +23504,7 @@ "type": "string" }, "ImageId": { - "markdownDescription": "The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see [Finding a Linux AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nIf you specify `InstanceId` , an `ImageId` is not required.", + "markdownDescription": "The ID of the Amazon Machine Image (AMI) that was assigned during registration. For more information, see [Find a Linux AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nIf you specify `InstanceId` , an `ImageId` is not required.", "title": "ImageId", "type": "string" }, @@ -23517,7 +23514,7 @@ "type": "string" }, "InstanceMonitoring": { - "markdownDescription": "Controls whether instances in this group are launched with detailed ( `true` ) or basic ( `false` ) monitoring.\n\nThe default value is `true` (enabled).\n\n> When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see [Configure Monitoring for Auto Scaling Instances](https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Controls whether instances in this group are launched with detailed ( `true` ) or basic ( `false` ) monitoring.\n\nThe default value is `true` (enabled).\n\n> When detailed monitoring is enabled, Amazon CloudWatch generates metrics every minute and your account is charged a fee. When you disable detailed monitoring, CloudWatch generates metrics every 5 minutes. For more information, see [Configure monitoring for Auto Scaling instances](https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "InstanceMonitoring", "type": "boolean" }, @@ -23532,7 +23529,7 @@ "type": "string" }, "KeyName": { - "markdownDescription": "The name of the key pair. For more information, see [Amazon EC2 key pairs and Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon EC2 User Guide for Linux Instances* .", + "markdownDescription": "The name of the key pair. For more information, see [Amazon EC2 key pairs and Amazon EC2 instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) in the *Amazon EC2 User Guide for Linux Instances* .", "title": "KeyName", "type": "string" }, @@ -23543,11 +23540,11 @@ }, "MetadataOptions": { "$ref": "#/definitions/AWS::AutoScaling::LaunchConfiguration.MetadataOptions", - "markdownDescription": "The metadata options for the instances. For more information, see [Configuring the Instance Metadata Options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "The metadata options for the instances. For more information, see [Configure the instance metadata options](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "MetadataOptions" }, "PlacementTenancy": { - "markdownDescription": "The tenancy of the instance, either `default` or `dedicated` . An instance with `dedicated` tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to `default` ), you must set the value of this property to `dedicated` . For more information, see [Configuring instance tenancy with Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-dedicated-instances.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify `PlacementTenancy` , you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.\n\nValid values: `default` | `dedicated`", + "markdownDescription": "The tenancy of the instance, either `default` or `dedicated` . An instance with `dedicated` tenancy runs on isolated, single-tenant hardware and can only be launched into a VPC. To launch dedicated instances into a shared tenancy VPC (a VPC with the instance placement tenancy attribute set to `default` ), you must set the value of this property to `dedicated` .\n\nIf you specify `PlacementTenancy` , you must specify at least one subnet for `VPCZoneIdentifier` when you create your group.\n\nValid values: `default` | `dedicated`", "title": "PlacementTenancy", "type": "string" }, @@ -23611,7 +23608,7 @@ "type": "boolean" }, "Encrypted": { - "markdownDescription": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see [Supported instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances) . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.\n\n> If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.\n> \n> If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the AWS managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.\n> \n> For more information, see [Use AWS KMS keys to encrypt Amazon EBS volumes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-data-protection.html#encryption) in the *Amazon EC2 Auto Scaling User Guide* .", + "markdownDescription": "Specifies whether the volume should be encrypted. Encrypted EBS volumes can only be attached to instances that support Amazon EBS encryption. For more information, see [Requirements for Amazon EBS encryption](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html) in the *Amazon EBS User Guide* . If your AMI uses encrypted volumes, you can also only launch it on supported instance types.\n\n> If you are creating a volume from a snapshot, you cannot create an unencrypted volume from an encrypted snapshot. Also, you cannot specify a KMS key ID when using a launch configuration.\n> \n> If you enable encryption by default, the EBS volumes that you create are always encrypted, either using the AWS managed KMS key or a customer-managed KMS key, regardless of whether the snapshot was encrypted.\n> \n> For more information, see [Use AWS KMS keys to encrypt Amazon EBS volumes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-data-protection.html#encryption) in the *Amazon EC2 Auto Scaling User Guide* .", "title": "Encrypted", "type": "boolean" }, @@ -23636,7 +23633,7 @@ "type": "number" }, "VolumeType": { - "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nValid values: `standard` | `io1` | `gp2` | `st1` | `sc1` | `gp3`", + "markdownDescription": "The volume type. For more information, see [Amazon EBS volume types](https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) in the *Amazon EBS User Guide* .\n\nValid values: `standard` | `io1` | `gp2` | `st1` | `sc1` | `gp3`", "title": "VolumeType", "type": "string" } @@ -23764,7 +23761,7 @@ "type": "string" }, "RoleARN": { - "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Configure a notification target for a lifecycle hook](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", + "markdownDescription": "The ARN of the IAM role that allows the Auto Scaling group to publish to the specified notification target. For information about creating this role, see [Prepare to add a lifecycle hook to your Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nValid only if the notification target is an Amazon SNS topic or an Amazon SQS queue.", "title": "RoleARN", "type": "string" } @@ -23842,7 +23839,7 @@ "type": "string" }, "Cooldown": { - "markdownDescription": "A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.\n\nValid only if the policy type is `SimpleScaling` . For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: None", + "markdownDescription": "A cooldown period, in seconds, that applies to a specific simple scaling policy. When a cooldown period is specified here, it overrides the default cooldown.\n\nValid only if the policy type is `SimpleScaling` . For more information, see [Scaling cooldowns for Amazon EC2 Auto Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: None", "title": "Cooldown", "type": "string" }, @@ -24085,7 +24082,7 @@ "additionalProperties": false, "properties": { "MaxCapacityBreachBehavior": { - "markdownDescription": "Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to `HonorMaxCapacity` if not specified.\n\nThe following are possible values:\n\n- `HonorMaxCapacity` - Amazon EC2 Auto Scaling cannot scale out capacity higher than the maximum capacity. The maximum capacity is enforced as a hard limit.\n- `IncreaseMaxCapacity` - Amazon EC2 Auto Scaling can scale out capacity higher than the maximum capacity when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for `MaxCapacityBuffer` .", + "markdownDescription": "Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Defaults to `HonorMaxCapacity` if not specified.\n\nThe following are possible values:\n\n- `HonorMaxCapacity` - Amazon EC2 Auto Scaling can't increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity.\n- `IncreaseMaxCapacity` - Amazon EC2 Auto Scaling can increase the maximum capacity of the group when the forecast capacity is close to or exceeds the maximum capacity. The upper limit is determined by the forecasted capacity and the value for `MaxCapacityBuffer` .\n\n> Use caution when allowing the maximum capacity to be automatically increased. This can lead to more instances being launched than intended if the increased maximum capacity is not monitored and managed. The increased maximum capacity then becomes the new normal maximum capacity for the Auto Scaling group until you manually update it. The maximum capacity does not automatically decrease back to the original maximum.", "title": "MaxCapacityBreachBehavior", "type": "string" }, @@ -25716,7 +25713,7 @@ }, "BackupPlanTags": { "additionalProperties": true, - "markdownDescription": "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair. The specified tags are assigned to all backups created with this plan.", + "markdownDescription": "The tags to assign to the backup plan.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -25831,7 +25828,7 @@ }, "RecoveryPointTags": { "additionalProperties": true, - "markdownDescription": "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.", + "markdownDescription": "The tags to assign to the resources.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -25905,7 +25902,7 @@ "type": "number" }, "OptInToArchiveForSupportedResources": { - "markdownDescription": "Optional Boolean. If this is true, this setting will instruct your backup plan to transition supported resources to archive (cold) storage tier in accordance with your lifecycle settings.", + "markdownDescription": "If the value is true, your backup plan transitions supported resources to archive (cold) storage tier in accordance with your lifecycle settings.", "title": "OptInToArchiveForSupportedResources", "type": "boolean" } @@ -26161,7 +26158,7 @@ }, "BackupVaultTags": { "additionalProperties": true, - "markdownDescription": "Metadata that you can assign to help organize the resources that you create. Each tag is a key-value pair.", + "markdownDescription": "The tags to assign to the backup vault.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -26316,7 +26313,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags with which to tag your framework.", + "markdownDescription": "The tags to assign to your framework.", "title": "FrameworkTags", "type": "array" } @@ -26404,7 +26401,7 @@ "items": { "$ref": "#/definitions/AWS::Backup::Framework.ControlInputParameter" }, - "markdownDescription": "A list of `ParameterName` and `ParameterValue` pairs.", + "markdownDescription": "The name/value pairs.", "title": "ControlInputParameters", "type": "array" }, @@ -26478,7 +26475,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags to tag your report plan.", + "markdownDescription": "The tags to assign to your report plan.", "title": "ReportPlanTags", "type": "array" }, @@ -26522,7 +26519,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of the format of your reports: `CSV` , `JSON` , or both. If not specified, the default format is `CSV` .", + "markdownDescription": "The format of your reports: `CSV` , `JSON` , or both. If not specified, the default format is `CSV` .", "title": "Formats", "type": "array" }, @@ -26805,7 +26802,7 @@ "type": "string" }, "RestoreTestingSelectionName": { - "markdownDescription": "This is the unique name of the restore testing selection that belongs to the related restore testing plan.", + "markdownDescription": "The unique name of the restore testing selection that belongs to the related restore testing plan.", "title": "RestoreTestingSelectionName", "type": "string" }, @@ -26848,12 +26845,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The tag key (String). The key can't start with `aws:` .\n\nLength Constraints: Minimum length of 1. Maximum length of 128.\n\nPattern: `^(?![aA]{1}[wW]{1}[sS]{1}:)([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+)$`", + "markdownDescription": "The tag key.", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "The value of the key.\n\nLength Constraints: Maximum length of 256.\n\nPattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`", + "markdownDescription": "The tag value.", "title": "Value", "type": "string" } @@ -27832,6 +27829,11 @@ "AWS::Batch::JobDefinition.EksContainerSecurityContext": { "additionalProperties": false, "properties": { + "AllowPrivilegeEscalation": { + "markdownDescription": "Whether or not a container or a Kubernetes pod is allowed to gain more privileges than its parent process. The default value is `false` .", + "title": "AllowPrivilegeEscalation", + "type": "boolean" + }, "Privileged": { "markdownDescription": "When this parameter is `true` , the container is given elevated permissions on the host container instance. The level of permissions are similar to the `root` user permissions. The default value is `false` . This parameter maps to `privileged` policy in the [Privileged pod security policies](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#privileged) in the *Kubernetes documentation* .", "title": "Privileged", @@ -28037,6 +28039,20 @@ }, "type": "object" }, + "AWS::Batch::JobDefinition.ImagePullSecret": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Provides a unique identifier for the `ImagePullSecret` . This object is required when `EksPodProperties$imagePullSecrets` is used.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, "AWS::Batch::JobDefinition.LinuxParameters": { "additionalProperties": false, "properties": { @@ -28231,6 +28247,14 @@ "title": "HostNetwork", "type": "boolean" }, + "ImagePullSecrets": { + "items": { + "$ref": "#/definitions/AWS::Batch::JobDefinition.ImagePullSecret" + }, + "markdownDescription": "", + "title": "ImagePullSecrets", + "type": "array" + }, "InitContainers": { "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" @@ -28858,7 +28882,7 @@ }, "type": "object" }, - "AWS::BillingConductor::BillingGroup": { + "AWS::Bedrock::Agent": { "additionalProperties": false, "properties": { "Condition": { @@ -28893,51 +28917,92 @@ "Properties": { "additionalProperties": false, "properties": { - "AccountGrouping": { - "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.AccountGrouping", - "markdownDescription": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", - "title": "AccountGrouping" + "ActionGroups": { + "items": { + "$ref": "#/definitions/AWS::Bedrock::Agent.AgentActionGroup" + }, + "markdownDescription": "The action groups that belong to an agent.", + "title": "ActionGroups", + "type": "array" }, - "ComputationPreference": { - "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.ComputationPreference", - "markdownDescription": "The preferences and settings that will be used to compute the AWS charges for a billing group.", - "title": "ComputationPreference" + "AgentName": { + "markdownDescription": "The name of the agent.", + "title": "AgentName", + "type": "string" + }, + "AgentResourceRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the agent.", + "title": "AgentResourceRoleArn", + "type": "string" + }, + "AutoPrepare": { + "markdownDescription": "Specifies whether to automatically update the `DRAFT` version of the agent after making changes to the agent. The `DRAFT` version can be continually iterated upon during internal development. By default, this value is `false` .", + "title": "AutoPrepare", + "type": "boolean" + }, + "CustomerEncryptionKeyArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the AWS KMS key that encrypts the agent.", + "title": "CustomerEncryptionKeyArn", + "type": "string" }, "Description": { - "markdownDescription": "The description of the billing group.", + "markdownDescription": "The description of the agent.", "title": "Description", "type": "string" }, - "Name": { - "markdownDescription": "The billing group's name.", - "title": "Name", + "FoundationModel": { + "markdownDescription": "The foundation model used for orchestration by the agent.", + "title": "FoundationModel", "type": "string" }, - "PrimaryAccountId": { - "markdownDescription": "The account ID that serves as the main account in a billing group.", - "title": "PrimaryAccountId", + "IdleSessionTTLInSeconds": { + "markdownDescription": "The number of seconds for which Amazon Bedrock keeps information about a user's conversation with the agent.\n\nA user interaction remains active for the amount of time specified. If no conversation occurs during this time, the session expires and Amazon Bedrock deletes any data provided before the timeout.", + "title": "IdleSessionTTLInSeconds", + "type": "number" + }, + "Instruction": { + "markdownDescription": "Instructions that tell the agent what it should do and how it should interact with users.", + "title": "Instruction", "type": "string" }, - "Tags": { + "KnowledgeBases": { "items": { - "$ref": "#/definitions/Tag" + "$ref": "#/definitions/AWS::Bedrock::Agent.AgentKnowledgeBase" }, - "markdownDescription": "A map that contains tag keys and tag values that are attached to a billing group.", - "title": "Tags", + "markdownDescription": "The knowledge bases associated with the agent.", + "title": "KnowledgeBases", "type": "array" + }, + "PromptOverrideConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::Agent.PromptOverrideConfiguration", + "markdownDescription": "Contains configurations to override prompt templates in different parts of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html) .", + "title": "PromptOverrideConfiguration" + }, + "SkipResourceInUseCheckOnDelete": { + "markdownDescription": "Specifies whether to delete the resource even if it's in use. By default, this value is `false` .", + "title": "SkipResourceInUseCheckOnDelete", + "type": "boolean" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Tags", + "type": "object" } }, "required": [ - "AccountGrouping", - "ComputationPreference", - "Name", - "PrimaryAccountId" + "AgentName" ], "type": "object" }, "Type": { "enum": [ - "AWS::BillingConductor::BillingGroup" + "AWS::Bedrock::Agent" ], "type": "string" }, @@ -28956,257 +29021,1231 @@ ], "type": "object" }, - "AWS::BillingConductor::BillingGroup.AccountGrouping": { + "AWS::Bedrock::Agent.APISchema": { "additionalProperties": false, "properties": { - "AutoAssociate": { - "markdownDescription": "Specifies if this billing group will automatically associate newly added AWS accounts that join your consolidated billing family.", - "title": "AutoAssociate", - "type": "boolean" + "Payload": { + "markdownDescription": "The JSON or YAML-formatted payload defining the OpenAPI schema for the action group. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "title": "Payload", + "type": "string" }, - "LinkedAccountIds": { - "items": { - "type": "string" - }, - "markdownDescription": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.", - "title": "LinkedAccountIds", - "type": "array" + "S3": { + "$ref": "#/definitions/AWS::Bedrock::Agent.S3Identifier", + "markdownDescription": "Contains details about the S3 object containing the OpenAPI schema for the action group. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "title": "S3" } }, - "required": [ - "LinkedAccountIds" - ], "type": "object" }, - "AWS::BillingConductor::BillingGroup.ComputationPreference": { + "AWS::Bedrock::Agent.ActionGroupExecutor": { "additionalProperties": false, "properties": { - "PricingPlanArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the pricing plan used to compute the AWS charges for a billing group.", - "title": "PricingPlanArn", + "Lambda": { + "markdownDescription": "The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action.", + "title": "Lambda", "type": "string" } }, "required": [ - "PricingPlanArn" + "Lambda" ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem": { + "AWS::Bedrock::Agent.AgentActionGroup": { "additionalProperties": false, "properties": { - "Condition": { - "type": "string" + "ActionGroupExecutor": { + "$ref": "#/definitions/AWS::Bedrock::Agent.ActionGroupExecutor", + "markdownDescription": "The Amazon Resource Name (ARN) of the Lambda function containing the business logic that is carried out upon invoking the action.", + "title": "ActionGroupExecutor" }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ActionGroupName": { + "markdownDescription": "The name of the action group.", + "title": "ActionGroupName", "type": "string" }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] + "ActionGroupState": { + "markdownDescription": "Specifies whether the action group is available for the agent to invoke or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request.", + "title": "ActionGroupState", + "type": "string" }, - "Metadata": { - "type": "object" + "ApiSchema": { + "$ref": "#/definitions/AWS::Bedrock::Agent.APISchema", + "markdownDescription": "Contains either details about the S3 object containing the OpenAPI schema for the action group or the JSON or YAML-formatted payload defining the schema. For more information, see [Action group OpenAPI schemas](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-api-schema.html) .", + "title": "ApiSchema" }, - "Properties": { - "additionalProperties": false, - "properties": { - "AccountId": { - "markdownDescription": "The AWS account in which this custom line item will be applied to.", - "title": "AccountId", - "type": "string" - }, - "BillingGroupArn": { - "markdownDescription": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.", - "title": "BillingGroupArn", - "type": "string" - }, - "BillingPeriodRange": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.BillingPeriodRange", - "markdownDescription": "A time range for which the custom line item is effective.", - "title": "BillingPeriodRange" - }, - "CustomLineItemChargeDetails": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails", - "markdownDescription": "The charge details of a custom line item. It should contain only one of `Flat` or `Percentage` .", - "title": "CustomLineItemChargeDetails" - }, - "Description": { - "markdownDescription": "The custom line item's description. This is shown on the Bills page in association with the charge value.", - "title": "Description", - "type": "string" - }, - "Name": { - "markdownDescription": "The custom line item's name.", - "title": "Name", - "type": "string" - }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "A map that contains tag keys and tag values that are attached to a custom line item.", - "title": "Tags", - "type": "array" - } - }, - "required": [ - "BillingGroupArn", - "Name" - ], - "type": "object" - }, - "Type": { - "enum": [ - "AWS::BillingConductor::CustomLineItem" - ], + "Description": { + "markdownDescription": "The description of the action group.", + "title": "Description", "type": "string" }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], + "ParentActionGroupSignature": { + "markdownDescription": "If this field is set as `AMAZON.UserInput` , the agent can request the user for additional information when trying to complete a task. The `description` , `apiSchema` , and `actionGroupExecutor` fields must be blank for this action group.\n\nDuring orchestration, if the agent determines that it needs to invoke an API in an action group, but doesn't have enough information to complete the API request, it will invoke this action group instead and return an [Observation](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_Observation.html) reprompting the user for more information.", + "title": "ParentActionGroupSignature", "type": "string" + }, + "SkipResourceInUseCheckOnDelete": { + "markdownDescription": "Specifies whether to delete the resource even if it's in use. By default, this value is `false` .", + "title": "SkipResourceInUseCheckOnDelete", + "type": "boolean" } }, "required": [ - "Type", - "Properties" + "ActionGroupName" ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.BillingPeriodRange": { + "AWS::Bedrock::Agent.AgentKnowledgeBase": { "additionalProperties": false, "properties": { - "ExclusiveEndBillingPeriod": { - "markdownDescription": "The exclusive end billing period that defines a billing period range where a custom line is applied.", - "title": "ExclusiveEndBillingPeriod", + "Description": { + "markdownDescription": "The description of the association between the agent and the knowledge base.", + "title": "Description", "type": "string" }, - "InclusiveStartBillingPeriod": { - "markdownDescription": "The inclusive start billing period that defines a billing period range where a custom line is applied.", - "title": "InclusiveStartBillingPeriod", + "KnowledgeBaseId": { + "markdownDescription": "The unique identifier of the association between the agent and the knowledge base.", + "title": "KnowledgeBaseId", + "type": "string" + }, + "KnowledgeBaseState": { + "markdownDescription": "Specifies whether to use the knowledge base or not when sending an [InvokeAgent](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent-runtime_InvokeAgent.html) request.", + "title": "KnowledgeBaseState", "type": "string" } }, + "required": [ + "Description", + "KnowledgeBaseId" + ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails": { + "AWS::Bedrock::Agent.InferenceConfiguration": { "additionalProperties": false, "properties": { - "Flat": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails", - "markdownDescription": "A `CustomLineItemFlatChargeDetails` that describes the charge details of a flat custom line item.", - "title": "Flat" + "MaximumLength": { + "markdownDescription": "The maximum number of tokens to allow in the generated response.", + "title": "MaximumLength", + "type": "number" }, - "LineItemFilters": { + "StopSequences": { "items": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.LineItemFilter" + "type": "string" }, - "markdownDescription": "A representation of the line item filter.", - "title": "LineItemFilters", + "markdownDescription": "A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.", + "title": "StopSequences", "type": "array" }, - "Percentage": { - "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails", - "markdownDescription": "A `CustomLineItemPercentageChargeDetails` that describes the charge details of a percentage custom line item.", - "title": "Percentage" + "Temperature": { + "markdownDescription": "The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.", + "title": "Temperature", + "type": "number" }, - "Type": { - "markdownDescription": "The type of the custom line item that indicates whether the charge is a fee or credit.", - "title": "Type", - "type": "string" + "TopK": { + "markdownDescription": "While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for `topK` is the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set `topK` to 50, the model selects the next token from among the top 50 most likely choices.", + "title": "TopK", + "type": "number" + }, + "TopP": { + "markdownDescription": "While generating a response, the model determines the probability of the following token at each point of generation. The value that you set for `Top P` determines the number of most-likely candidates from which the model chooses the next token in the sequence. For example, if you set `topP` to 80, the model only selects the next token from the top 80% of the probability distribution of next tokens.", + "title": "TopP", + "type": "number" } }, - "required": [ - "Type" - ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails": { + "AWS::Bedrock::Agent.PromptConfiguration": { "additionalProperties": false, "properties": { - "ChargeValue": { - "markdownDescription": "The custom line item's fixed charge value in USD.", - "title": "ChargeValue", - "type": "number" + "BasePromptTemplate": { + "markdownDescription": "Defines the prompt template with which to replace the default prompt template. You can use placeholder variables in the base prompt template to customize the prompt. For more information, see [Prompt template placeholder variables](https://docs.aws.amazon.com/bedrock/latest/userguide/prompt-placeholders.html) .", + "title": "BasePromptTemplate", + "type": "string" + }, + "InferenceConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::Agent.InferenceConfiguration", + "markdownDescription": "Contains inference parameters to use when the agent invokes a foundation model in the part of the agent sequence defined by the `promptType` . For more information, see [Inference parameters for foundation models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters.html) .", + "title": "InferenceConfiguration" + }, + "ParserMode": { + "markdownDescription": "Specifies whether to override the default parser Lambda function when parsing the raw foundation model output in the part of the agent sequence defined by the `promptType` . If you set the field as `OVERRIDEN` , the `overrideLambda` field in the [PromptOverrideConfiguration](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_PromptOverrideConfiguration.html) must be specified with the ARN of a Lambda function.", + "title": "ParserMode", + "type": "string" + }, + "PromptCreationMode": { + "markdownDescription": "Specifies whether to override the default prompt template for this `promptType` . Set this value to `OVERRIDDEN` to use the prompt that you provide in the `basePromptTemplate` . If you leave it as `DEFAULT` , the agent uses a default prompt template.", + "title": "PromptCreationMode", + "type": "string" + }, + "PromptState": { + "markdownDescription": "Specifies whether to allow the agent to carry out the step specified in the `promptType` . If you set this value to `DISABLED` , the agent skips that step. The default state for each `promptType` is as follows.\n\n- `PRE_PROCESSING` \u2013 `ENABLED`\n- `ORCHESTRATION` \u2013 `ENABLED`\n- `KNOWLEDGE_BASE_RESPONSE_GENERATION` \u2013 `ENABLED`\n- `POST_PROCESSING` \u2013 `DISABLED`", + "title": "PromptState", + "type": "string" + }, + "PromptType": { + "markdownDescription": "The step in the agent sequence that this prompt configuration applies to.", + "title": "PromptType", + "type": "string" } }, - "required": [ - "ChargeValue" - ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails": { + "AWS::Bedrock::Agent.PromptOverrideConfiguration": { "additionalProperties": false, "properties": { - "ChildAssociatedResources": { + "OverrideLambda": { + "markdownDescription": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` .", + "title": "OverrideLambda", + "type": "string" + }, + "PromptConfigurations": { "items": { - "type": "string" + "$ref": "#/definitions/AWS::Bedrock::Agent.PromptConfiguration" }, - "markdownDescription": "A list of resource ARNs to associate to the percentage custom line item.", - "title": "ChildAssociatedResources", + "markdownDescription": "Contains configurations to override a prompt template in one part of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html) .", + "title": "PromptConfigurations", "type": "array" - }, - "PercentageValue": { - "markdownDescription": "The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.", - "title": "PercentageValue", - "type": "number" } }, "required": [ - "PercentageValue" + "PromptConfigurations" ], "type": "object" }, - "AWS::BillingConductor::CustomLineItem.LineItemFilter": { + "AWS::Bedrock::Agent.S3Identifier": { "additionalProperties": false, "properties": { - "Attribute": { - "markdownDescription": "The attribute of the line item filter. This specifies what attribute that you can filter on.", - "title": "Attribute", + "S3BucketName": { + "markdownDescription": "The name of the S3 bucket.", + "title": "S3BucketName", "type": "string" }, - "MatchOption": { - "markdownDescription": "The match criteria of the line item filter. This parameter specifies whether not to include the resource value from the billing group total cost.", - "title": "MatchOption", + "S3ObjectKey": { + "markdownDescription": "The S3 object key containing the resource.", + "title": "S3ObjectKey", "type": "string" - }, - "Values": { - "items": { - "type": "string" - }, - "markdownDescription": "The values of the line item filter. This specifies the values to filter on. Currently, you can only exclude Savings Plan discounts.", - "title": "Values", - "type": "array" } }, - "required": [ - "Attribute", - "MatchOption", - "Values" - ], "type": "object" }, - "AWS::BillingConductor::PricingPlan": { + "AWS::Bedrock::AgentAlias": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AgentAliasName": { + "markdownDescription": "The name of the alias of the agent.", + "title": "AgentAliasName", + "type": "string" + }, + "AgentId": { + "markdownDescription": "The unique identifier of the agent.", + "title": "AgentId", + "type": "string" + }, + "Description": { + "markdownDescription": "The description of the alias of the agent.", + "title": "Description", + "type": "string" + }, + "RoutingConfiguration": { + "items": { + "$ref": "#/definitions/AWS::Bedrock::AgentAlias.AgentAliasRoutingConfigurationListItem" + }, + "markdownDescription": "Contains details about the routing configuration of the alias.", + "title": "RoutingConfiguration", + "type": "array" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Tags", + "type": "object" + } + }, + "required": [ + "AgentAliasName", + "AgentId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Bedrock::AgentAlias" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Bedrock::AgentAlias.AgentAliasHistoryEvent": { + "additionalProperties": false, + "properties": { + "EndDate": { + "markdownDescription": "The date that the alias stopped being associated to the version in the `routingConfiguration` object", + "title": "EndDate", + "type": "string" + }, + "RoutingConfiguration": { + "items": { + "$ref": "#/definitions/AWS::Bedrock::AgentAlias.AgentAliasRoutingConfigurationListItem" + }, + "markdownDescription": "Contains details about the version of the agent with which the alias is associated.", + "title": "RoutingConfiguration", + "type": "array" + }, + "StartDate": { + "markdownDescription": "The date that the alias began being associated to the version in the `routingConfiguration` object.", + "title": "StartDate", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Bedrock::AgentAlias.AgentAliasRoutingConfigurationListItem": { + "additionalProperties": false, + "properties": { + "AgentVersion": { + "markdownDescription": "The version of the agent with which the alias is associated.", + "title": "AgentVersion", + "type": "string" + } + }, + "required": [ + "AgentVersion" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DataSourceConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.DataSourceConfiguration", + "markdownDescription": "Contains details about how the data source is stored.", + "title": "DataSourceConfiguration" + }, + "Description": { + "markdownDescription": "The description of the data source.", + "title": "Description", + "type": "string" + }, + "KnowledgeBaseId": { + "markdownDescription": "The unique identifier of the knowledge base to which the data source belongs.", + "title": "KnowledgeBaseId", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the data source.", + "title": "Name", + "type": "string" + }, + "ServerSideEncryptionConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.ServerSideEncryptionConfiguration", + "markdownDescription": "Contains details about the configuration of the server-side encryption.", + "title": "ServerSideEncryptionConfiguration" + }, + "VectorIngestionConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.VectorIngestionConfiguration", + "markdownDescription": "Contains details about how to ingest the documents in the data source.", + "title": "VectorIngestionConfiguration" + } + }, + "required": [ + "DataSourceConfiguration", + "KnowledgeBaseId", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Bedrock::DataSource" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.ChunkingConfiguration": { + "additionalProperties": false, + "properties": { + "ChunkingStrategy": { + "markdownDescription": "Knowledge base can split your source data into chunks. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for `NONE` , then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.\n\n- `FIXED_SIZE` \u2013 Amazon Bedrock splits your source data into chunks of the approximate size that you set in the `fixedSizeChunkingConfiguration` .\n- `NONE` \u2013 Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.", + "title": "ChunkingStrategy", + "type": "string" + }, + "FixedSizeChunkingConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.FixedSizeChunkingConfiguration", + "markdownDescription": "Configurations for when you choose fixed-size chunking. If you set the `chunkingStrategy` as `NONE` , exclude this field.", + "title": "FixedSizeChunkingConfiguration" + } + }, + "required": [ + "ChunkingStrategy" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.DataSourceConfiguration": { + "additionalProperties": false, + "properties": { + "S3Configuration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.S3DataSourceConfiguration", + "markdownDescription": "Contains details about the configuration of the S3 object containing the data source.", + "title": "S3Configuration" + }, + "Type": { + "markdownDescription": "The type of storage for the data source.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "S3Configuration", + "Type" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.FixedSizeChunkingConfiguration": { + "additionalProperties": false, + "properties": { + "MaxTokens": { + "markdownDescription": "The maximum number of tokens to include in a chunk.", + "title": "MaxTokens", + "type": "number" + }, + "OverlapPercentage": { + "markdownDescription": "The percentage of overlap between adjacent chunks of a data source.", + "title": "OverlapPercentage", + "type": "number" + } + }, + "required": [ + "MaxTokens", + "OverlapPercentage" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.S3DataSourceConfiguration": { + "additionalProperties": false, + "properties": { + "BucketArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the bucket that contains the data source.", + "title": "BucketArn", + "type": "string" + }, + "InclusionPrefixes": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of S3 prefixes that define the object containing the data sources. For more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) .", + "title": "InclusionPrefixes", + "type": "array" + } + }, + "required": [ + "BucketArn" + ], + "type": "object" + }, + "AWS::Bedrock::DataSource.ServerSideEncryptionConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the AWS KMS key used to encrypt the resource.", + "title": "KmsKeyArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Bedrock::DataSource.VectorIngestionConfiguration": { + "additionalProperties": false, + "properties": { + "ChunkingConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::DataSource.ChunkingConfiguration", + "markdownDescription": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.", + "title": "ChunkingConfiguration" + } + }, + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the knowledge base.", + "title": "Description", + "type": "string" + }, + "KnowledgeBaseConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.KnowledgeBaseConfiguration", + "markdownDescription": "Contains details about the embeddings configuration of the knowledge base.", + "title": "KnowledgeBaseConfiguration" + }, + "Name": { + "markdownDescription": "The name of the knowledge base.", + "title": "Name", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role with permissions to invoke API operations on the knowledge base.", + "title": "RoleArn", + "type": "string" + }, + "StorageConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.StorageConfiguration", + "markdownDescription": "Contains details about the storage configuration of the knowledge base.", + "title": "StorageConfiguration" + }, + "Tags": { + "additionalProperties": true, + "markdownDescription": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "Tags", + "type": "object" + } + }, + "required": [ + "KnowledgeBaseConfiguration", + "Name", + "RoleArn", + "StorageConfiguration" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Bedrock::KnowledgeBase" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.KnowledgeBaseConfiguration": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The type of data that the data source is converted into for the knowledge base.", + "title": "Type", + "type": "string" + }, + "VectorKnowledgeBaseConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.VectorKnowledgeBaseConfiguration", + "markdownDescription": "Contains details about the embeddings model that'sused to convert the data source.", + "title": "VectorKnowledgeBaseConfiguration" + } + }, + "required": [ + "Type", + "VectorKnowledgeBaseConfiguration" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.OpenSearchServerlessConfiguration": { + "additionalProperties": false, + "properties": { + "CollectionArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the OpenSearch Service vector store.", + "title": "CollectionArn", + "type": "string" + }, + "FieldMapping": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.OpenSearchServerlessFieldMapping", + "markdownDescription": "Contains the names of the fields to which to map information about the vector store.", + "title": "FieldMapping" + }, + "VectorIndexName": { + "markdownDescription": "The name of the vector store.", + "title": "VectorIndexName", + "type": "string" + } + }, + "required": [ + "CollectionArn", + "FieldMapping", + "VectorIndexName" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.OpenSearchServerlessFieldMapping": { + "additionalProperties": false, + "properties": { + "MetadataField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "title": "MetadataField", + "type": "string" + }, + "TextField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "title": "TextField", + "type": "string" + }, + "VectorField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.", + "title": "VectorField", + "type": "string" + } + }, + "required": [ + "MetadataField", + "TextField", + "VectorField" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.PineconeConfiguration": { + "additionalProperties": false, + "properties": { + "ConnectionString": { + "markdownDescription": "The endpoint URL for your index management page.", + "title": "ConnectionString", + "type": "string" + }, + "CredentialsSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that is linked to your Pinecone API key.", + "title": "CredentialsSecretArn", + "type": "string" + }, + "FieldMapping": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.PineconeFieldMapping", + "markdownDescription": "Contains the names of the fields to which to map information about the vector store.", + "title": "FieldMapping" + }, + "Namespace": { + "markdownDescription": "The namespace to be used to write new data to your database.", + "title": "Namespace", + "type": "string" + } + }, + "required": [ + "ConnectionString", + "CredentialsSecretArn", + "FieldMapping" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.PineconeFieldMapping": { + "additionalProperties": false, + "properties": { + "MetadataField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "title": "MetadataField", + "type": "string" + }, + "TextField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "title": "TextField", + "type": "string" + } + }, + "required": [ + "MetadataField", + "TextField" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.RdsConfiguration": { + "additionalProperties": false, + "properties": { + "CredentialsSecretArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that is linked to your Amazon RDS database.", + "title": "CredentialsSecretArn", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of your Amazon RDS database.", + "title": "DatabaseName", + "type": "string" + }, + "FieldMapping": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.RdsFieldMapping", + "markdownDescription": "Contains the names of the fields to which to map information about the vector store.", + "title": "FieldMapping" + }, + "ResourceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the vector store.", + "title": "ResourceArn", + "type": "string" + }, + "TableName": { + "markdownDescription": "The name of the table in the database.", + "title": "TableName", + "type": "string" + } + }, + "required": [ + "CredentialsSecretArn", + "DatabaseName", + "FieldMapping", + "ResourceArn", + "TableName" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.RdsFieldMapping": { + "additionalProperties": false, + "properties": { + "MetadataField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "title": "MetadataField", + "type": "string" + }, + "PrimaryKeyField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the ID for each entry.", + "title": "PrimaryKeyField", + "type": "string" + }, + "TextField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "title": "TextField", + "type": "string" + }, + "VectorField": { + "markdownDescription": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources.", + "title": "VectorField", + "type": "string" + } + }, + "required": [ + "MetadataField", + "PrimaryKeyField", + "TextField", + "VectorField" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.StorageConfiguration": { + "additionalProperties": false, + "properties": { + "OpensearchServerlessConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.OpenSearchServerlessConfiguration", + "markdownDescription": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.", + "title": "OpensearchServerlessConfiguration" + }, + "PineconeConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.PineconeConfiguration", + "markdownDescription": "Contains the storage configuration of the knowledge base in Pinecone.", + "title": "PineconeConfiguration" + }, + "RdsConfiguration": { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.RdsConfiguration", + "markdownDescription": "Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see [Create a vector index in Amazon RDS](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html) .", + "title": "RdsConfiguration" + }, + "Type": { + "markdownDescription": "The vector store service in which the knowledge base is stored.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Bedrock::KnowledgeBase.VectorKnowledgeBaseConfiguration": { + "additionalProperties": false, + "properties": { + "EmbeddingModelArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "title": "EmbeddingModelArn", + "type": "string" + } + }, + "required": [ + "EmbeddingModelArn" + ], + "type": "object" + }, + "AWS::BillingConductor::BillingGroup": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AccountGrouping": { + "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.AccountGrouping", + "markdownDescription": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", + "title": "AccountGrouping" + }, + "ComputationPreference": { + "$ref": "#/definitions/AWS::BillingConductor::BillingGroup.ComputationPreference", + "markdownDescription": "The preferences and settings that will be used to compute the AWS charges for a billing group.", + "title": "ComputationPreference" + }, + "Description": { + "markdownDescription": "The description of the billing group.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The billing group's name.", + "title": "Name", + "type": "string" + }, + "PrimaryAccountId": { + "markdownDescription": "The account ID that serves as the main account in a billing group.", + "title": "PrimaryAccountId", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A map that contains tag keys and tag values that are attached to a billing group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "AccountGrouping", + "ComputationPreference", + "Name", + "PrimaryAccountId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::BillingConductor::BillingGroup" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::BillingConductor::BillingGroup.AccountGrouping": { + "additionalProperties": false, + "properties": { + "AutoAssociate": { + "markdownDescription": "Specifies if this billing group will automatically associate newly added AWS accounts that join your consolidated billing family.", + "title": "AutoAssociate", + "type": "boolean" + }, + "LinkedAccountIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The account IDs that make up the billing group. Account IDs must be a part of the consolidated billing family, and not associated with another billing group.", + "title": "LinkedAccountIds", + "type": "array" + } + }, + "required": [ + "LinkedAccountIds" + ], + "type": "object" + }, + "AWS::BillingConductor::BillingGroup.ComputationPreference": { + "additionalProperties": false, + "properties": { + "PricingPlanArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the pricing plan used to compute the AWS charges for a billing group.", + "title": "PricingPlanArn", + "type": "string" + } + }, + "required": [ + "PricingPlanArn" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AccountId": { + "markdownDescription": "The AWS account in which this custom line item will be applied to.", + "title": "AccountId", + "type": "string" + }, + "BillingGroupArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that references the billing group where the custom line item applies to.", + "title": "BillingGroupArn", + "type": "string" + }, + "BillingPeriodRange": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.BillingPeriodRange", + "markdownDescription": "A time range for which the custom line item is effective.", + "title": "BillingPeriodRange" + }, + "CustomLineItemChargeDetails": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails", + "markdownDescription": "The charge details of a custom line item. It should contain only one of `Flat` or `Percentage` .", + "title": "CustomLineItemChargeDetails" + }, + "Description": { + "markdownDescription": "The custom line item's description. This is shown on the Bills page in association with the charge value.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The custom line item's name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A map that contains tag keys and tag values that are attached to a custom line item.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "BillingGroupArn", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::BillingConductor::CustomLineItem" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.BillingPeriodRange": { + "additionalProperties": false, + "properties": { + "ExclusiveEndBillingPeriod": { + "markdownDescription": "The exclusive end billing period that defines a billing period range where a custom line is applied.", + "title": "ExclusiveEndBillingPeriod", + "type": "string" + }, + "InclusiveStartBillingPeriod": { + "markdownDescription": "The inclusive start billing period that defines a billing period range where a custom line is applied.", + "title": "InclusiveStartBillingPeriod", + "type": "string" + } + }, + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.CustomLineItemChargeDetails": { + "additionalProperties": false, + "properties": { + "Flat": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails", + "markdownDescription": "A `CustomLineItemFlatChargeDetails` that describes the charge details of a flat custom line item.", + "title": "Flat" + }, + "LineItemFilters": { + "items": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.LineItemFilter" + }, + "markdownDescription": "A representation of the line item filter.", + "title": "LineItemFilters", + "type": "array" + }, + "Percentage": { + "$ref": "#/definitions/AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails", + "markdownDescription": "A `CustomLineItemPercentageChargeDetails` that describes the charge details of a percentage custom line item.", + "title": "Percentage" + }, + "Type": { + "markdownDescription": "The type of the custom line item that indicates whether the charge is a fee or credit.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.CustomLineItemFlatChargeDetails": { + "additionalProperties": false, + "properties": { + "ChargeValue": { + "markdownDescription": "The custom line item's fixed charge value in USD.", + "title": "ChargeValue", + "type": "number" + } + }, + "required": [ + "ChargeValue" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.CustomLineItemPercentageChargeDetails": { + "additionalProperties": false, + "properties": { + "ChildAssociatedResources": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of resource ARNs to associate to the percentage custom line item.", + "title": "ChildAssociatedResources", + "type": "array" + }, + "PercentageValue": { + "markdownDescription": "The custom line item's percentage value. This will be multiplied against the combined value of its associated resources to determine its charge value.", + "title": "PercentageValue", + "type": "number" + } + }, + "required": [ + "PercentageValue" + ], + "type": "object" + }, + "AWS::BillingConductor::CustomLineItem.LineItemFilter": { + "additionalProperties": false, + "properties": { + "Attribute": { + "markdownDescription": "The attribute of the line item filter. This specifies what attribute that you can filter on.", + "title": "Attribute", + "type": "string" + }, + "MatchOption": { + "markdownDescription": "The match criteria of the line item filter. This parameter specifies whether not to include the resource value from the billing group total cost.", + "title": "MatchOption", + "type": "string" + }, + "Values": { + "items": { + "type": "string" + }, + "markdownDescription": "The values of the line item filter. This specifies the values to filter on. Currently, you can only exclude Savings Plan discounts.", + "title": "Values", + "type": "array" + } + }, + "required": [ + "Attribute", + "MatchOption", + "Values" + ], + "type": "object" + }, + "AWS::BillingConductor::PricingPlan": { "additionalProperties": false, "properties": { "Condition": { @@ -31253,7 +32292,7 @@ "type": "string" }, "TeamsChannelId": { - "markdownDescription": "", + "markdownDescription": "The ID of the Microsoft Teams channel.\n\nTo get the channel ID, open Microsoft Teams, right click on the channel name in the left pane, then choose Copy. An example of the channel ID syntax is: `19%3ab6ef35dc342d56ba5654e6fc6d25a071%40thread.tacv2` .", "title": "TeamsChannelId", "type": "string" }, @@ -32136,7 +33175,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.DifferentialPrivacyColumn" }, - "markdownDescription": "", + "markdownDescription": "The name of the column, such as user_id, that contains the unique identifier of your users, whose privacy you want to protect. If you want to turn on differential privacy for two or more tables in a collaboration, you must configure the same column as the user identifier column in both analysis rules.", "title": "Columns", "type": "array" } @@ -32469,6 +33508,319 @@ ], "type": "object" }, + "AWS::CleanRooms::PrivacyBudgetTemplate": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoRefresh": { + "markdownDescription": "How often the privacy budget refreshes.\n\n> If you plan to regularly bring new data into the collaboration, use `CALENDAR_MONTH` to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queried across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.", + "title": "AutoRefresh", + "type": "string" + }, + "MembershipIdentifier": { + "markdownDescription": "The identifier for a membership resource.", + "title": "MembershipIdentifier", + "type": "string" + }, + "Parameters": { + "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", + "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "title": "Parameters" + }, + "PrivacyBudgetType": { + "markdownDescription": "Specifies the type of the privacy budget template.", + "title": "PrivacyBudgetType", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "AutoRefresh", + "MembershipIdentifier", + "Parameters", + "PrivacyBudgetType" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CleanRooms::PrivacyBudgetTemplate" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::CleanRooms::PrivacyBudgetTemplate.Parameters": { + "additionalProperties": false, + "properties": { + "Epsilon": { + "markdownDescription": "The epsilon value that you want to use.", + "title": "Epsilon", + "type": "number" + }, + "UsersNoisePerQuery": { + "markdownDescription": "Noise added per query is measured in terms of the number of users whose contributions you want to obscure. This value governs the rate at which the privacy budget is depleted.", + "title": "UsersNoisePerQuery", + "type": "number" + } + }, + "required": [ + "Epsilon", + "UsersNoisePerQuery" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the training dataset.", + "title": "Description", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of the training dataset.", + "title": "Name", + "type": "string" + }, + "RoleArn": { + "markdownDescription": "The ARN of the IAM role that Clean Rooms ML can assume to read the data referred to in the `dataSource` field of each dataset.\n\nPassing a role across accounts is not allowed. If you pass a role that isn't in your account, you get an `AccessDeniedException` error.", + "title": "RoleArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The optional metadata that you apply to the resource to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50.\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8.\n- Maximum value length - 256 Unicode characters in UTF-8.\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for keys as it is reserved. You cannot edit or delete tag keys with this prefix. Values can have this prefix. If a tag value has `aws` as its prefix but the key does not, then Clean Rooms ML considers it to be a user tag and will count against the limit of 50 tags. Tags with only the key prefix of `aws` do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + }, + "TrainingData": { + "items": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.Dataset" + }, + "markdownDescription": "An array of information that lists the Dataset objects, which specifies the dataset type and details on its location and schema. You must provide a role that has read access to these tables.", + "title": "TrainingData", + "type": "array" + } + }, + "required": [ + "Name", + "RoleArn", + "TrainingData" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CleanRoomsML::TrainingDataset" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.ColumnSchema": { + "additionalProperties": false, + "properties": { + "ColumnName": { + "markdownDescription": "The name of a column.", + "title": "ColumnName", + "type": "string" + }, + "ColumnTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The data type of column.", + "title": "ColumnTypes", + "type": "array" + } + }, + "required": [ + "ColumnName", + "ColumnTypes" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.DataSource": { + "additionalProperties": false, + "properties": { + "GlueDataSource": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.GlueDataSource", + "markdownDescription": "A GlueDataSource object that defines the catalog ID, database name, and table name for the training data.", + "title": "GlueDataSource" + } + }, + "required": [ + "GlueDataSource" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.Dataset": { + "additionalProperties": false, + "properties": { + "InputConfig": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.DatasetInputConfig", + "markdownDescription": "A DatasetInputConfig object that defines the data source and schema mapping.", + "title": "InputConfig" + }, + "Type": { + "markdownDescription": "What type of information is found in the dataset.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "InputConfig", + "Type" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.DatasetInputConfig": { + "additionalProperties": false, + "properties": { + "DataSource": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.DataSource", + "markdownDescription": "A DataSource object that specifies the Glue data source for the training data.", + "title": "DataSource" + }, + "Schema": { + "items": { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset.ColumnSchema" + }, + "markdownDescription": "The schema information for the training data.", + "title": "Schema", + "type": "array" + } + }, + "required": [ + "DataSource", + "Schema" + ], + "type": "object" + }, + "AWS::CleanRoomsML::TrainingDataset.GlueDataSource": { + "additionalProperties": false, + "properties": { + "CatalogId": { + "markdownDescription": "The Glue catalog that contains the training data.", + "title": "CatalogId", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The Glue database that contains the training data.", + "title": "DatabaseName", + "type": "string" + }, + "TableName": { + "markdownDescription": "The Glue table that contains the training data.", + "title": "TableName", + "type": "string" + } + }, + "required": [ + "DatabaseName", + "TableName" + ], + "type": "object" + }, "AWS::Cloud9::EnvironmentEC2": { "additionalProperties": false, "properties": { @@ -37436,7 +38788,7 @@ "items": { "$ref": "#/definitions/AWS::CloudTrail::EventDataStore.AdvancedEventSelector" }, - "markdownDescription": "The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.\n\nFor more information about how to use advanced event selectors to log CloudTrail events, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include AWS Config configuration items in your event data store, see [Create an event data store for AWS Config configuration items](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-lake-cli.html#lake-cli-create-eds-config) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include non- AWS events in your event data store, see [Create an integration to log events from outside AWS](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/query-lake-cli.html#lake-cli-create-integration) in the CloudTrail User Guide.", + "markdownDescription": "The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.\n\nFor more information about how to use advanced event selectors to log CloudTrail events, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include AWS Config configuration items in your event data store, see [Create an event data store for AWS Config configuration items](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/lake-eds-cli.html#lake-cli-create-eds-config) in the CloudTrail User Guide.\n\nFor more information about how to use advanced event selectors to include events outside of AWS events in your event data store, see [Create an integration to log events from outside AWS](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/lake-integrations-cli.html#lake-cli-create-integration) in the CloudTrail User Guide.", "title": "AdvancedEventSelectors", "type": "array" }, @@ -37802,12 +39154,12 @@ "type": "string" }, "S3BucketName": { - "markdownDescription": "Specifies the name of the Amazon S3 bucket designated for publishing log files. See [Amazon S3 Bucket Naming Requirements](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create_trail_naming_policy.html) .", + "markdownDescription": "Specifies the name of the Amazon S3 bucket designated for publishing log files. See [Amazon S3 Bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .", "title": "S3BucketName", "type": "string" }, "S3KeyPrefix": { - "markdownDescription": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see [Finding Your CloudTrail Log Files](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html) . The maximum length is 200 characters.", + "markdownDescription": "Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see [Finding Your CloudTrail Log Files](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/get-and-view-cloudtrail-log-files.html#cloudtrail-find-log-files) . The maximum length is 200 characters.", "title": "S3KeyPrefix", "type": "string" }, @@ -38141,6 +39493,14 @@ "title": "Statistic", "type": "string" }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.", + "title": "Tags", + "type": "array" + }, "Threshold": { "markdownDescription": "The value to compare with the specified statistic.", "title": "Threshold", @@ -38356,6 +39716,11 @@ "title": "Dimensions", "type": "array" }, + "MetricCharacteristics": { + "$ref": "#/definitions/AWS::CloudWatch::AnomalyDetector.MetricCharacteristics", + "markdownDescription": "Use this object to include parameters to provide information about your metric to CloudWatch to help it build more accurate anomaly detection models. Currently, it includes the `PeriodicSpikes` parameter.", + "title": "MetricCharacteristics" + }, "MetricMathAnomalyDetector": { "$ref": "#/definitions/AWS::CloudWatch::AnomalyDetector.MetricMathAnomalyDetector", "markdownDescription": "The CloudWatch metric math expression for this anomaly detector.", @@ -38471,6 +39836,17 @@ ], "type": "object" }, + "AWS::CloudWatch::AnomalyDetector.MetricCharacteristics": { + "additionalProperties": false, + "properties": { + "PeriodicSpikes": { + "markdownDescription": "Set this parameter to true if values for this metric consistently include spikes that should not be considered to be anomalies. With this set to true, CloudWatch will expect to see spikes that occurred consistently during the model training period, and won't flag future similar spikes as anomalies.", + "title": "PeriodicSpikes", + "type": "boolean" + } + }, + "type": "object" + }, "AWS::CloudWatch::AnomalyDetector.MetricDataQueries": { "additionalProperties": false, "properties": {}, @@ -38712,6 +40088,14 @@ "markdownDescription": "The actions to execute when this alarm transitions to the OK state from any other state. Each action is specified as an Amazon Resource Name (ARN). For more information about creating alarms and the actions that you can specify, see [PutCompositeAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutCompositeAlarm.html) in the *Amazon CloudWatch API Reference* .", "title": "OKActions", "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm. To be able to associate tags with the alarm when you create the alarm, you must have the `cloudwatch:TagResource` permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values.", + "title": "Tags", + "type": "array" } }, "required": [ @@ -39849,7 +41233,7 @@ "type": "string" }, "ComputeType": { - "markdownDescription": "The type of compute environment. This determines the number of CPU cores and memory the build environment uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 15 GB memory and 8 vCPUs for builds.\n\nFor more information, see [Build Environment Compute Types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "markdownDescription": "The type of compute environment. This determines the number of CPU cores and memory the build environment uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n- `BUILD_LAMBDA_1GB` : Use up to 1 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_2GB` : Use up to 2 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_4GB` : Use up to 4 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_8GB` : Use up to 8 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n- `BUILD_LAMBDA_10GB` : Use up to 10 GB memory for builds. Only available for environment type `LINUX_LAMBDA_CONTAINER` and `ARM_LAMBDA_CONTAINER` .\n\nFor more information, see [Build Environment Compute Types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "title": "ComputeType", "type": "string" }, @@ -40184,7 +41568,7 @@ "type": "boolean" }, "Location": { - "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeStar Connections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", + "markdownDescription": "Information about the location of the source code to be built. Valid values include:\n\n- For source code settings that are specified in the source action of a pipeline in CodePipeline, `location` should not be specified. If it is specified, CodePipeline ignores it. This is because CodePipeline uses the settings in a pipeline's source action instead of this value.\n- For source code in an CodeCommit repository, the HTTPS clone URL to the repository that contains the source code and the buildspec file (for example, `https://git-codecommit..amazonaws.com/v1/repos/` ).\n- For source code in an Amazon S3 input bucket, one of the following.\n\n- The path to the ZIP file that contains the source code (for example, `//.zip` ).\n- The path to the folder that contains the source code (for example, `///` ).\n- For source code in a GitHub repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitHub account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitHub, on the GitHub *Authorize application* page, for *Organization access* , choose *Request access* next to each repository you want to allow AWS CodeBuild to have access to, and then choose *Authorize application* . (After you have connected to your GitHub account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n- For source code in an GitLab or self-managed GitLab repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your GitLab account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with GitLab, on the Connections *Authorize application* page, choose *Authorize* . Then on the AWS CodeConnections *Create GitLab connection* page, choose *Connect to GitLab* . (After you have connected to your GitLab account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to override the default connection and use this connection instead, set the `auth` object's `type` value to `CODECONNECTIONS` in the `source` object.\n- For source code in a Bitbucket repository, the HTTPS clone URL to the repository that contains the source and the buildspec file. You must connect your AWS account to your Bitbucket account. Use the AWS CodeBuild console to start creating a build project. When you use the console to connect (or reconnect) with Bitbucket, on the Bitbucket *Confirm access to your account* page, choose *Grant access* . (After you have connected to your Bitbucket account, you do not need to finish creating the build project. You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this connection, in the `source` object, set the `auth` object's `type` value to `OAUTH` .\n\nIf you specify `CODEPIPELINE` for the `Type` property, don't specify this property. For all of the other types, you must specify `Location` .", "title": "Location", "type": "string" }, @@ -40269,7 +41653,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of webhook filter. There are eight webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , and `RELEASE_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of eight event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , and `PRERELEASED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` and `PRERELEASED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.", + "markdownDescription": "The type of webhook filter. There are nine webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , `COMMIT_MESSAGE` , `TAG_NAME` , `RELEASE_NAME` , and `WORKFLOW_NAME` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of nine event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , `PULL_REQUEST_MERGED` , `RELEASED` , `PRERELEASED` , and `WORKFLOW_JOB_QUEUED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only. The `RELEASED` , `PRERELEASED` , and `WORKFLOW_JOB_QUEUED` work with GitHub only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\n> Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- TAG_NAME\n\n- A webhook triggers a build when the tag name of the release matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- RELEASE_NAME\n\n- A webhook triggers a build when the release name matches the regular expression `pattern` .\n\n> Works with `RELEASED` and `PRERELEASED` events only.\n- WORKFLOW_NAME\n\n- A webhook triggers a build when the workflow name matches the regular expression `pattern` .\n\n> Works with `WORKFLOW_JOB_QUEUED` events only.", "title": "Type", "type": "string" } @@ -40475,7 +41859,7 @@ "type": "string" }, "Token": { - "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is the app password.", + "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password.", "title": "Token", "type": "string" }, @@ -40697,6 +42081,91 @@ ], "type": "object" }, + "AWS::CodeConnections::Connection": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ConnectionName": { + "markdownDescription": "The name of the connection. Connection names must be unique in an AWS account .", + "title": "ConnectionName", + "type": "string" + }, + "HostArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the host associated with the connection.", + "title": "HostArn", + "type": "string" + }, + "ProviderType": { + "markdownDescription": "The name of the external provider where your third-party code repository is configured.", + "title": "ProviderType", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ConnectionName" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::CodeConnections::Connection" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::CodeDeploy::Application": { "additionalProperties": false, "properties": { @@ -55688,7 +57157,7 @@ "type": "string" }, "CaptureDdls": { - "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nThe default value is `true` .\n\nIf this value is set to `N` , you don't have to create tables or triggers on the source database.", + "markdownDescription": "To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.\n\nIf this value is set to `True` , you don't have to create tables or triggers on the source database.", "title": "CaptureDdls", "type": "boolean" }, @@ -60746,7 +62215,7 @@ "type": "string" }, "ServerCertificate": { - "markdownDescription": "Specifies a file with the certificates that are used to sign the object storage server's certificate (for example, `file:///home/user/.ssh/storage_sys_certificate.pem` ). The file you specify must include the following:\n\n- The certificate of the signing certificate authority (CA)\n- Any intermediate certificates\n- base64 encoding\n- A `.pem` extension\n\nThe file can be up to 32768 bytes (before base64 encoding).\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", + "markdownDescription": "Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single `.pem` file with a full certificate chain (for example, `file:///home/user/.ssh/object_storage_certificates.pem` ).\n\nThe certificate chain might include:\n\n- The object storage system's certificate\n- All intermediate certificates (if there are any)\n- The root certificate of the signing CA\n\nYou can concatenate your certificates into a `.pem` file (which can be up to 32768 bytes before base64 encoding). The following example `cat` command creates an `object_storage_certificates.pem` file that includes three certificates:\n\n`cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem`\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", "title": "ServerCertificate", "type": "string" }, @@ -61806,6 +63275,11 @@ "AWS::DataZone::DataSource.GlueRunConfigurationInput": { "additionalProperties": false, "properties": { + "AutoImportDataQualityResult": { + "markdownDescription": "", + "title": "AutoImportDataQualityResult", + "type": "boolean" + }, "DataAccessRole": { "markdownDescription": "The data access role included in the configuration details of the AWS Glue data source.", "title": "DataAccessRole", @@ -62682,7 +64156,7 @@ ], "type": "object" }, - "AWS::Detective::Graph": { + "AWS::Deadline::Farm": { "additionalProperties": false, "properties": { "Condition": { @@ -62717,25 +64191,30 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoEnableMembers": { - "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", - "title": "AutoEnableMembers", - "type": "boolean" + "Description": { + "markdownDescription": "A description of the farm that helps identify what the farm is used for.", + "title": "Description", + "type": "string" }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "The tag values to assign to the new behavior graph.", - "title": "Tags", - "type": "array" + "DisplayName": { + "markdownDescription": "The display name of the farm.", + "title": "DisplayName", + "type": "string" + }, + "KmsKeyArn": { + "markdownDescription": "The ARN for the KMS key.", + "title": "KmsKeyArn", + "type": "string" } }, + "required": [ + "DisplayName" + ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::Graph" + "AWS::Deadline::Farm" ], "type": "string" }, @@ -62749,11 +64228,12 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, - "AWS::Detective::MemberInvitation": { + "AWS::Deadline::Fleet": { "additionalProperties": false, "properties": { "Condition": { @@ -62788,42 +64268,53 @@ "Properties": { "additionalProperties": false, "properties": { - "DisableEmailNotification": { - "markdownDescription": "Whether to send an invitation email to the member account. If set to true, the member account does not receive an invitation email.", - "title": "DisableEmailNotification", - "type": "boolean" + "Configuration": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetConfiguration", + "markdownDescription": "The configuration details for the fleet.", + "title": "Configuration" }, - "GraphArn": { - "markdownDescription": "The ARN of the behavior graph to invite the account to contribute data to.", - "title": "GraphArn", + "Description": { + "markdownDescription": "A description that helps identify what the fleet is used for.", + "title": "Description", "type": "string" }, - "MemberEmailAddress": { - "markdownDescription": "The root user email address of the invited account. If the email address provided is not the root user email address for the provided account, the invitation creation fails.", - "title": "MemberEmailAddress", + "DisplayName": { + "markdownDescription": "The display name of the fleet summary to update.", + "title": "DisplayName", "type": "string" }, - "MemberId": { - "markdownDescription": "The AWS account identifier of the invited account", - "title": "MemberId", + "FarmId": { + "markdownDescription": "The farm ID.", + "title": "FarmId", "type": "string" }, - "Message": { - "markdownDescription": "Customized text to include in the invitation email message.", - "title": "Message", + "MaxWorkerCount": { + "markdownDescription": "The maximum number of workers specified in the fleet.", + "title": "MaxWorkerCount", + "type": "number" + }, + "MinWorkerCount": { + "markdownDescription": "The minimum number of workers in the fleet.", + "title": "MinWorkerCount", + "type": "number" + }, + "RoleArn": { + "markdownDescription": "The IAM role that workers in the fleet use when processing jobs.", + "title": "RoleArn", "type": "string" } }, "required": [ - "GraphArn", - "MemberEmailAddress", - "MemberId" + "Configuration", + "DisplayName", + "MaxWorkerCount", + "RoleArn" ], "type": "object" }, "Type": { "enum": [ - "AWS::Detective::MemberInvitation" + "AWS::Deadline::Fleet" ], "type": "string" }, @@ -62842,7 +64333,1191 @@ ], "type": "object" }, - "AWS::Detective::OrganizationAdmin": { + "AWS::Deadline::Fleet.AcceleratorCountRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum GPU for the accelerator.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum GPU for the accelerator.", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.AcceleratorTotalMemoryMiBRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of memory to use for the accelerator, measured in MiB.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of memory to use for the accelerator, measured in MiB.", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.CustomerManagedFleetConfiguration": { + "additionalProperties": false, + "properties": { + "Mode": { + "markdownDescription": "The AWS Auto Scaling mode for the customer managed fleet configuration.", + "title": "Mode", + "type": "string" + }, + "StorageProfileId": { + "markdownDescription": "The storage profile ID.", + "title": "StorageProfileId", + "type": "string" + }, + "WorkerCapabilities": { + "$ref": "#/definitions/AWS::Deadline::Fleet.CustomerManagedWorkerCapabilities", + "markdownDescription": "The worker capabilities for a customer managed fleet configuration.", + "title": "WorkerCapabilities" + } + }, + "required": [ + "Mode", + "WorkerCapabilities" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.CustomerManagedWorkerCapabilities": { + "additionalProperties": false, + "properties": { + "AcceleratorCount": { + "$ref": "#/definitions/AWS::Deadline::Fleet.AcceleratorCountRange", + "markdownDescription": "The range of the accelerator.", + "title": "AcceleratorCount" + }, + "AcceleratorTotalMemoryMiB": { + "$ref": "#/definitions/AWS::Deadline::Fleet.AcceleratorTotalMemoryMiBRange", + "markdownDescription": "The total memory (MiB) for the customer managed worker capabilities.", + "title": "AcceleratorTotalMemoryMiB" + }, + "AcceleratorTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The accelerator types for the customer managed worker capabilities.", + "title": "AcceleratorTypes", + "type": "array" + }, + "CpuArchitectureType": { + "markdownDescription": "The CPU architecture type for the customer managed worker capabilities.", + "title": "CpuArchitectureType", + "type": "string" + }, + "CustomAmounts": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAmountCapability" + }, + "markdownDescription": "Custom requirement ranges for customer managed worker capabilities.", + "title": "CustomAmounts", + "type": "array" + }, + "CustomAttributes": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAttributeCapability" + }, + "markdownDescription": "Custom attributes for the customer manged worker capabilities.", + "title": "CustomAttributes", + "type": "array" + }, + "MemoryMiB": { + "$ref": "#/definitions/AWS::Deadline::Fleet.MemoryMiBRange", + "markdownDescription": "The memory (MiB).", + "title": "MemoryMiB" + }, + "OsFamily": { + "markdownDescription": "The operating system (OS) family.", + "title": "OsFamily", + "type": "string" + }, + "VCpuCount": { + "$ref": "#/definitions/AWS::Deadline::Fleet.VCpuCountRange", + "markdownDescription": "The vCPU count for the customer manged worker capabilities.", + "title": "VCpuCount" + } + }, + "required": [ + "CpuArchitectureType", + "MemoryMiB", + "OsFamily", + "VCpuCount" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.Ec2EbsVolume": { + "additionalProperties": false, + "properties": { + "Iops": { + "markdownDescription": "The IOPS per volume.", + "title": "Iops", + "type": "number" + }, + "SizeGiB": { + "markdownDescription": "The EBS volume size in GiB.", + "title": "SizeGiB", + "type": "number" + }, + "ThroughputMiB": { + "markdownDescription": "The throughput per volume in MiB.", + "title": "ThroughputMiB", + "type": "number" + } + }, + "type": "object" + }, + "AWS::Deadline::Fleet.FleetAmountCapability": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of the fleet worker capability.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of fleet worker capability.", + "title": "Min", + "type": "number" + }, + "Name": { + "markdownDescription": "The name of the fleet capability.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Min", + "Name" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.FleetAttributeCapability": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the fleet attribute capability for the worker.", + "title": "Name", + "type": "string" + }, + "Values": { + "items": { + "type": "string" + }, + "markdownDescription": "The number of fleet attribute capabilities.", + "title": "Values", + "type": "array" + } + }, + "required": [ + "Name", + "Values" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.FleetCapabilities": { + "additionalProperties": false, + "properties": { + "Amounts": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAmountCapability" + }, + "markdownDescription": "Amount capabilities of the fleet.", + "title": "Amounts", + "type": "array" + }, + "Attributes": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAttributeCapability" + }, + "markdownDescription": "Attribute capabilities of the fleet.", + "title": "Attributes", + "type": "array" + } + }, + "type": "object" + }, + "AWS::Deadline::Fleet.FleetConfiguration": { + "additionalProperties": false, + "properties": { + "CustomerManaged": { + "$ref": "#/definitions/AWS::Deadline::Fleet.CustomerManagedFleetConfiguration", + "markdownDescription": "The customer managed fleets within a fleet configuration.", + "title": "CustomerManaged" + }, + "ServiceManagedEc2": { + "$ref": "#/definitions/AWS::Deadline::Fleet.ServiceManagedEc2FleetConfiguration", + "markdownDescription": "The service managed Amazon EC2 instances for a fleet configuration.", + "title": "ServiceManagedEc2" + } + }, + "type": "object" + }, + "AWS::Deadline::Fleet.MemoryMiBRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of memory (in MiB).", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of memory (in MiB).", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.ServiceManagedEc2FleetConfiguration": { + "additionalProperties": false, + "properties": { + "InstanceCapabilities": { + "$ref": "#/definitions/AWS::Deadline::Fleet.ServiceManagedEc2InstanceCapabilities", + "markdownDescription": "The Amazon EC2 instance capabilities.", + "title": "InstanceCapabilities" + }, + "InstanceMarketOptions": { + "$ref": "#/definitions/AWS::Deadline::Fleet.ServiceManagedEc2InstanceMarketOptions", + "markdownDescription": "The Amazon EC2 market type.", + "title": "InstanceMarketOptions" + } + }, + "required": [ + "InstanceCapabilities", + "InstanceMarketOptions" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.ServiceManagedEc2InstanceCapabilities": { + "additionalProperties": false, + "properties": { + "AllowedInstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The allowable Amazon EC2 instance types.", + "title": "AllowedInstanceTypes", + "type": "array" + }, + "CpuArchitectureType": { + "markdownDescription": "The CPU architecture type.", + "title": "CpuArchitectureType", + "type": "string" + }, + "CustomAmounts": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAmountCapability" + }, + "markdownDescription": "The custom capability amounts to require for instances in this fleet.", + "title": "CustomAmounts", + "type": "array" + }, + "CustomAttributes": { + "items": { + "$ref": "#/definitions/AWS::Deadline::Fleet.FleetAttributeCapability" + }, + "markdownDescription": "The custom capability attributes to require for instances in this fleet.", + "title": "CustomAttributes", + "type": "array" + }, + "ExcludedInstanceTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "The instance types to exclude from the fleet.", + "title": "ExcludedInstanceTypes", + "type": "array" + }, + "MemoryMiB": { + "$ref": "#/definitions/AWS::Deadline::Fleet.MemoryMiBRange", + "markdownDescription": "The memory, as MiB, for the Amazon EC2 instance type.", + "title": "MemoryMiB" + }, + "OsFamily": { + "markdownDescription": "The operating system (OS) family.", + "title": "OsFamily", + "type": "string" + }, + "RootEbsVolume": { + "$ref": "#/definitions/AWS::Deadline::Fleet.Ec2EbsVolume", + "markdownDescription": "The root EBS volume.", + "title": "RootEbsVolume" + }, + "VCpuCount": { + "$ref": "#/definitions/AWS::Deadline::Fleet.VCpuCountRange", + "markdownDescription": "The amount of vCPU to require for instances in this fleet.", + "title": "VCpuCount" + } + }, + "required": [ + "CpuArchitectureType", + "MemoryMiB", + "OsFamily", + "VCpuCount" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.ServiceManagedEc2InstanceMarketOptions": { + "additionalProperties": false, + "properties": { + "Type": { + "markdownDescription": "The Amazon EC2 instance type.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Deadline::Fleet.VCpuCountRange": { + "additionalProperties": false, + "properties": { + "Max": { + "markdownDescription": "The maximum amount of vCPU.", + "title": "Max", + "type": "number" + }, + "Min": { + "markdownDescription": "The minimum amount of vCPU.", + "title": "Min", + "type": "number" + } + }, + "required": [ + "Min" + ], + "type": "object" + }, + "AWS::Deadline::LicenseEndpoint": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "SecurityGroupIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The identifier of the Amazon EC2 security group that controls access to the license endpoint.", + "title": "SecurityGroupIds", + "type": "array" + }, + "SubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "Identifies the VPC subnets that can connect to a license endpoint.", + "title": "SubnetIds", + "type": "array" + }, + "VpcId": { + "markdownDescription": "The VCP(virtual private cloud) ID associated with the license endpoint.", + "title": "VpcId", + "type": "string" + } + }, + "required": [ + "SecurityGroupIds", + "SubnetIds", + "VpcId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::LicenseEndpoint" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::MeteredProduct": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Family": { + "markdownDescription": "The family to which the metered product belongs.", + "title": "Family", + "type": "string" + }, + "LicenseEndpointId": { + "markdownDescription": "The Amazon EC2 identifier of the license endpoint.", + "title": "LicenseEndpointId", + "type": "string" + }, + "Port": { + "markdownDescription": "The port on which the metered product should run.", + "title": "Port", + "type": "number" + }, + "ProductId": { + "markdownDescription": "The product ID.", + "title": "ProductId", + "type": "string" + }, + "Vendor": { + "markdownDescription": "The vendor.", + "title": "Vendor", + "type": "string" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::MeteredProduct" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Deadline::Queue": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AllowedStorageProfileIds": { + "items": { + "type": "string" + }, + "markdownDescription": "The identifiers of the storage profiles that this queue can use to share assets between workers using different operating systems.", + "title": "AllowedStorageProfileIds", + "type": "array" + }, + "DefaultBudgetAction": { + "markdownDescription": "The default action taken on a queue summary if a budget wasn't configured.", + "title": "DefaultBudgetAction", + "type": "string" + }, + "Description": { + "markdownDescription": "A description of the queue that helps identify what the queue is used for.", + "title": "Description", + "type": "string" + }, + "DisplayName": { + "markdownDescription": "The display name of the queue summary to update.", + "title": "DisplayName", + "type": "string" + }, + "FarmId": { + "markdownDescription": "The farm ID.", + "title": "FarmId", + "type": "string" + }, + "JobAttachmentSettings": { + "$ref": "#/definitions/AWS::Deadline::Queue.JobAttachmentSettings", + "markdownDescription": "The job attachment settings. These are the Amazon S3 bucket name and the Amazon S3 prefix.", + "title": "JobAttachmentSettings" + }, + "JobRunAsUser": { + "$ref": "#/definitions/AWS::Deadline::Queue.JobRunAsUser", + "markdownDescription": "Identifies the user for a job.", + "title": "JobRunAsUser" + }, + "RequiredFileSystemLocationNames": { + "items": { + "type": "string" + }, + "markdownDescription": "The file system location that the queue uses.", + "title": "RequiredFileSystemLocationNames", + "type": "array" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role that workers use when running jobs in this queue.", + "title": "RoleArn", + "type": "string" + } + }, + "required": [ + "DisplayName" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::Queue" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::Queue.JobAttachmentSettings": { + "additionalProperties": false, + "properties": { + "RootPrefix": { + "markdownDescription": "The root prefix.", + "title": "RootPrefix", + "type": "string" + }, + "S3BucketName": { + "markdownDescription": "The Amazon S3 bucket name.", + "title": "S3BucketName", + "type": "string" + } + }, + "required": [ + "RootPrefix", + "S3BucketName" + ], + "type": "object" + }, + "AWS::Deadline::Queue.JobRunAsUser": { + "additionalProperties": false, + "properties": { + "Posix": { + "$ref": "#/definitions/AWS::Deadline::Queue.PosixUser", + "markdownDescription": "The user and group that the jobs in the queue run as.", + "title": "Posix" + }, + "RunAs": { + "markdownDescription": "Specifies whether the job should run using the queue's system user or if the job should run using the worker agent system user.", + "title": "RunAs", + "type": "string" + }, + "Windows": { + "$ref": "#/definitions/AWS::Deadline::Queue.WindowsUser", + "markdownDescription": "Identifies a Microsoft Windows user.", + "title": "Windows" + } + }, + "required": [ + "RunAs" + ], + "type": "object" + }, + "AWS::Deadline::Queue.PosixUser": { + "additionalProperties": false, + "properties": { + "Group": { + "markdownDescription": "The name of the POSIX user's group.", + "title": "Group", + "type": "string" + }, + "User": { + "markdownDescription": "The name of the POSIX user.", + "title": "User", + "type": "string" + } + }, + "required": [ + "Group", + "User" + ], + "type": "object" + }, + "AWS::Deadline::Queue.WindowsUser": { + "additionalProperties": false, + "properties": { + "PasswordArn": { + "markdownDescription": "The password ARN for the Windows user.", + "title": "PasswordArn", + "type": "string" + }, + "User": { + "markdownDescription": "The user.", + "title": "User", + "type": "string" + } + }, + "required": [ + "PasswordArn", + "User" + ], + "type": "object" + }, + "AWS::Deadline::QueueEnvironment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "FarmId": { + "markdownDescription": "The identifier assigned to the farm that contains the queue.", + "title": "FarmId", + "type": "string" + }, + "Priority": { + "markdownDescription": "The queue environment's priority.", + "title": "Priority", + "type": "number" + }, + "QueueId": { + "markdownDescription": "The unique identifier of the queue that contains the environment.", + "title": "QueueId", + "type": "string" + }, + "Template": { + "markdownDescription": "A JSON or YAML template the describes the processing environment for the queue.", + "title": "Template", + "type": "string" + }, + "TemplateType": { + "markdownDescription": "Specifies whether the template for the queue environment is JSON or YAML.", + "title": "TemplateType", + "type": "string" + } + }, + "required": [ + "FarmId", + "Priority", + "QueueId", + "Template", + "TemplateType" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::QueueEnvironment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::QueueFleetAssociation": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "FarmId": { + "markdownDescription": "The identifier of the farm that contains the queue and the fleet.", + "title": "FarmId", + "type": "string" + }, + "FleetId": { + "markdownDescription": "The fleet ID.", + "title": "FleetId", + "type": "string" + }, + "QueueId": { + "markdownDescription": "The queue ID.", + "title": "QueueId", + "type": "string" + } + }, + "required": [ + "FarmId", + "FleetId", + "QueueId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::QueueFleetAssociation" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::StorageProfile": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DisplayName": { + "markdownDescription": "The display name of the storage profile summary to update.", + "title": "DisplayName", + "type": "string" + }, + "FarmId": { + "markdownDescription": "The unique identifier of the farm that contains the storage profile.", + "title": "FarmId", + "type": "string" + }, + "FileSystemLocations": { + "items": { + "$ref": "#/definitions/AWS::Deadline::StorageProfile.FileSystemLocation" + }, + "markdownDescription": "Operating system specific file system path to the storage location.", + "title": "FileSystemLocations", + "type": "array" + }, + "OsFamily": { + "markdownDescription": "The operating system (OS) family.", + "title": "OsFamily", + "type": "string" + } + }, + "required": [ + "DisplayName", + "OsFamily" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Deadline::StorageProfile" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Deadline::StorageProfile.FileSystemLocation": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The location name.", + "title": "Name", + "type": "string" + }, + "Path": { + "markdownDescription": "The file path.", + "title": "Path", + "type": "string" + }, + "Type": { + "markdownDescription": "The type of file.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "Name", + "Path", + "Type" + ], + "type": "object" + }, + "AWS::Detective::Graph": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AutoEnableMembers": { + "markdownDescription": "Indicates whether to automatically enable new organization accounts as member accounts in the organization behavior graph.\n\nBy default, this property is set to `false` . If you want to change the value of this property, you must be the Detective administrator for the organization. For more information on setting a Detective administrator account, see [AWS::Detective::OrganizationAdmin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-detective-organizationadmin.html)", + "title": "AutoEnableMembers", + "type": "boolean" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tag values to assign to the new behavior graph.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::Graph" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::Detective::MemberInvitation": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DisableEmailNotification": { + "markdownDescription": "Whether to send an invitation email to the member account. If set to true, the member account does not receive an invitation email.", + "title": "DisableEmailNotification", + "type": "boolean" + }, + "GraphArn": { + "markdownDescription": "The ARN of the behavior graph to invite the account to contribute data to.", + "title": "GraphArn", + "type": "string" + }, + "MemberEmailAddress": { + "markdownDescription": "The root user email address of the invited account. If the email address provided is not the root user email address for the provided account, the invitation creation fails.", + "title": "MemberEmailAddress", + "type": "string" + }, + "MemberId": { + "markdownDescription": "The AWS account identifier of the invited account", + "title": "MemberId", + "type": "string" + }, + "Message": { + "markdownDescription": "Customized text to include in the invitation email message.", + "title": "Message", + "type": "string" + } + }, + "required": [ + "GraphArn", + "MemberEmailAddress", + "MemberId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Detective::MemberInvitation" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::Detective::OrganizationAdmin": { "additionalProperties": false, "properties": { "Condition": { @@ -66277,6 +68952,16 @@ "title": "BgpAsn", "type": "number" }, + "BgpAsnExtended": { + "markdownDescription": "", + "title": "BgpAsnExtended", + "type": "number" + }, + "CertificateArn": { + "markdownDescription": "The Amazon Resource Name (ARN) for the customer gateway certificate.", + "title": "CertificateArn", + "type": "string" + }, "DeviceName": { "markdownDescription": "The name of customer gateway device.", "title": "DeviceName", @@ -66302,7 +68987,6 @@ } }, "required": [ - "BgpAsn", "IpAddress", "Type" ], @@ -68595,7 +71279,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::Instance.ElasticGpuSpecification" }, - "markdownDescription": "Deprecated.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, G4dn, or G5 instances.", + "markdownDescription": "An elastic GPU to associate with the instance.\n\n> Amazon Elastic Graphics reached end of life on January 8, 2024.", "title": "ElasticGpuSpecifications", "type": "array" }, @@ -68603,7 +71287,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::Instance.ElasticInferenceAccelerator" }, - "markdownDescription": "An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.\n\nYou cannot specify accelerators from different generations in the same request.\n\n> Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.", + "markdownDescription": "An elastic inference accelerator to associate with the instance.\n\n> Amazon Elastic Inference (EI) is no longer available to new customers. For more information, see [Amazon Elastic Inference FAQs](https://docs.aws.amazon.com/machine-learning/elastic-inference/faqs/) .", "title": "ElasticInferenceAccelerators", "type": "array" }, @@ -68672,7 +71356,7 @@ }, "LaunchTemplate": { "$ref": "#/definitions/AWS::EC2::Instance.LaunchTemplateSpecification", - "markdownDescription": "The launch template to use to launch the instances. Any parameters that you specify in the AWS CloudFormation template override the same parameters in the launch template. You can specify either the name or ID of a launch template, but not both.", + "markdownDescription": "The launch template. Any additional parameters that you specify for the new instance overwrite the corresponding parameters included in the launch template.", "title": "LaunchTemplate" }, "LicenseSpecifications": { @@ -68997,17 +71681,17 @@ "additionalProperties": false, "properties": { "LaunchTemplateId": { - "markdownDescription": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", + "markdownDescription": "The ID of the launch template.\n\nYou must specify either the launch template ID or the launch template name, but not both.", "title": "LaunchTemplateId", "type": "string" }, "LaunchTemplateName": { - "markdownDescription": "The name of the launch template.\n\nYou must specify the `LaunchTemplateName` or the `LaunchTemplateId` , but not both.", + "markdownDescription": "The name of the launch template.\n\nYou must specify either the launch template ID or the launch template name, but not both.", "title": "LaunchTemplateName", "type": "string" }, "Version": { - "markdownDescription": "The version number of the launch template.\n\nSpecifying `$Latest` or `$Default` for the template version number is not supported. However, you can specify `LatestVersionNumber` or `DefaultVersionNumber` using the `Fn::GetAtt` intrinsic function. For more information, see [Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#aws-resource-ec2-launchtemplate-return-values-fn--getatt) .", + "markdownDescription": "The version number of the launch template. You must specify this property.\n\nTo specify the default version of the template, use the `Fn::GetAtt` intrinsic function to retrieve the `DefaultVersionNumber` attribute of the launch template. To specify the latest version of the template, use `Fn::GetAtt` to retrieve the `LatestVersionNumber` attribute. For more information, see [AWS::EC2:LaunchTemplate return values for Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#aws-resource-ec2-launchtemplate-return-values-fn--getatt) .", "title": "Version", "type": "string" } @@ -73637,11 +76321,6 @@ "title": "IpProtocol", "type": "string" }, - "SourceSecurityGroupId": { - "markdownDescription": "", - "title": "SourceSecurityGroupId", - "type": "string" - }, "ToPort": { "markdownDescription": "If the protocol is TCP or UDP, this is the end of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP code or -1 (all ICMP codes). If the start port is -1 (all ICMP types), then the end port must be -1 (all ICMP codes).", "title": "ToPort", @@ -79727,7 +82406,7 @@ }, "type": "object" }, - "AWS::ECS::CapacityProvider": { + "AWS::ECR::RepositoryCreationTemplate": { "additionalProperties": false, "properties": { "Condition": { @@ -79762,33 +82441,62 @@ "Properties": { "additionalProperties": false, "properties": { - "AutoScalingGroupProvider": { - "$ref": "#/definitions/AWS::ECS::CapacityProvider.AutoScalingGroupProvider", - "markdownDescription": "The Auto Scaling group settings for the capacity provider.", - "title": "AutoScalingGroupProvider" + "AppliedFor": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "AppliedFor", + "type": "array" }, - "Name": { - "markdownDescription": "The name of the capacity provider. If a name is specified, it cannot start with `aws` , `ecs` , or `fargate` . If no name is specified, a default name in the `CFNStackName-CFNResourceName-RandomString` format is used.", - "title": "Name", + "Description": { + "markdownDescription": "", + "title": "Description", "type": "string" }, - "Tags": { + "EncryptionConfiguration": { + "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", + "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "title": "EncryptionConfiguration" + }, + "ImageTagMutability": { + "markdownDescription": "", + "title": "ImageTagMutability", + "type": "string" + }, + "LifecyclePolicy": { + "markdownDescription": "", + "title": "LifecyclePolicy", + "type": "string" + }, + "Prefix": { + "markdownDescription": "", + "title": "Prefix", + "type": "string" + }, + "RepositoryPolicy": { + "markdownDescription": "", + "title": "RepositoryPolicy", + "type": "string" + }, + "ResourceTags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", - "title": "Tags", + "markdownDescription": "The tags attached to the resource.", + "title": "ResourceTags", "type": "array" } }, "required": [ - "AutoScalingGroupProvider" + "AppliedFor", + "Prefix" ], "type": "object" }, "Type": { "enum": [ - "AWS::ECS::CapacityProvider" + "AWS::ECR::RepositoryCreationTemplate" ], "type": "string" }, @@ -79807,67 +82515,26 @@ ], "type": "object" }, - "AWS::ECS::CapacityProvider.AutoScalingGroupProvider": { + "AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration": { "additionalProperties": false, "properties": { - "AutoScalingGroupArn": { - "markdownDescription": "The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.", - "title": "AutoScalingGroupArn", - "type": "string" - }, - "ManagedDraining": { - "markdownDescription": "The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.", - "title": "ManagedDraining", + "EncryptionType": { + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "title": "EncryptionType", "type": "string" }, - "ManagedScaling": { - "$ref": "#/definitions/AWS::ECS::CapacityProvider.ManagedScaling", - "markdownDescription": "The managed scaling settings for the Auto Scaling group capacity provider.", - "title": "ManagedScaling" - }, - "ManagedTerminationProtection": { - "markdownDescription": "The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection. The default is off.\n\n> When using managed termination protection, managed scaling must also be used otherwise managed termination protection doesn't work. \n\nWhen managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions on as well. For more information, see [Instance Protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection) in the *AWS Auto Scaling User Guide* .\n\nWhen managed termination protection is off, your Amazon EC2 instances aren't protected from termination when the Auto Scaling group scales in.", - "title": "ManagedTerminationProtection", + "KmsKey": { + "markdownDescription": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used.", + "title": "KmsKey", "type": "string" } }, "required": [ - "AutoScalingGroupArn" + "EncryptionType" ], "type": "object" }, - "AWS::ECS::CapacityProvider.ManagedScaling": { - "additionalProperties": false, - "properties": { - "InstanceWarmupPeriod": { - "markdownDescription": "The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of `300` seconds is used.", - "title": "InstanceWarmupPeriod", - "type": "number" - }, - "MaximumScalingStepSize": { - "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", - "title": "MaximumScalingStepSize", - "type": "number" - }, - "MinimumScalingStepSize": { - "markdownDescription": "The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter If this parameter is omitted, the default value of `1` is used.\n\nWhen additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the actual demand is less than the minimum scaling step size.\n\nIf you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value and will ignore both the maximum scaling step size as well as the capacity demand.", - "title": "MinimumScalingStepSize", - "type": "number" - }, - "Status": { - "markdownDescription": "Determines whether to use managed scaling for the capacity provider.", - "title": "Status", - "type": "string" - }, - "TargetCapacity": { - "markdownDescription": "The target capacity utilization as a percentage for the capacity provider. The specified value must be greater than `0` and less than or equal to `100` . For example, if you want the capacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a `targetCapacity` of `90` . The default value of `100` percent results in the Amazon EC2 instances in your Auto Scaling group being completely used.", - "title": "TargetCapacity", - "type": "number" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster": { + "AWS::ECS::CapacityProvider": { "additionalProperties": false, "properties": { "Condition": { @@ -79902,59 +82569,33 @@ "Properties": { "additionalProperties": false, "properties": { - "CapacityProviders": { - "items": { - "type": "string" - }, - "markdownDescription": "The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the [CreateService](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) or [RunTask](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) actions.\n\nIf specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be created but not associated with another cluster. New Auto Scaling group capacity providers can be created with the [CreateCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html) API operation.\n\nTo use a AWS Fargate capacity provider, specify either the `FARGATE` or `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.\n\nThe [PutCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html) API operation is used to update the list of available capacity providers for a cluster after the cluster is created.", - "title": "CapacityProviders", - "type": "array" + "AutoScalingGroupProvider": { + "$ref": "#/definitions/AWS::ECS::CapacityProvider.AutoScalingGroupProvider", + "markdownDescription": "The Auto Scaling group settings for the capacity provider.", + "title": "AutoScalingGroupProvider" }, - "ClusterName": { - "markdownDescription": "A user-generated string that you use to identify your cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID for the name.", - "title": "ClusterName", + "Name": { + "markdownDescription": "The name of the capacity provider. If a name is specified, it cannot start with `aws` , `ecs` , or `fargate` . If no name is specified, a default name in the `CFNStackName-CFNResourceName-RandomString` format is used.", + "title": "Name", "type": "string" }, - "ClusterSettings": { - "items": { - "$ref": "#/definitions/AWS::ECS::Cluster.ClusterSettings" - }, - "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.", - "title": "ClusterSettings", - "type": "array" - }, - "Configuration": { - "$ref": "#/definitions/AWS::ECS::Cluster.ClusterConfiguration", - "markdownDescription": "The execute command configuration for the cluster.", - "title": "Configuration" - }, - "DefaultCapacityProviderStrategy": { - "items": { - "$ref": "#/definitions/AWS::ECS::Cluster.CapacityProviderStrategyItem" - }, - "markdownDescription": "The default capacity provider strategy for the cluster. When services or tasks are run in the cluster with no launch type or capacity provider strategy specified, the default capacity provider strategy is used.", - "title": "DefaultCapacityProviderStrategy", - "type": "array" - }, - "ServiceConnectDefaults": { - "$ref": "#/definitions/AWS::ECS::Cluster.ServiceConnectDefaults", - "markdownDescription": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the `enabled` parameter to `true` in the `ServiceConnectConfiguration` . You can set the namespace of each service individually in the `ServiceConnectConfiguration` to override this default parameter.\n\nTasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", - "title": "ServiceConnectDefaults" - }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "markdownDescription": "The metadata that you apply to the capacity provider to help you categorize and organize it. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", "title": "Tags", "type": "array" } }, + "required": [ + "AutoScalingGroupProvider" + ], "type": "object" }, "Type": { "enum": [ - "AWS::ECS::Cluster" + "AWS::ECS::CapacityProvider" ], "type": "string" }, @@ -79968,122 +82609,288 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, - "AWS::ECS::Cluster.CapacityProviderStrategyItem": { - "additionalProperties": false, - "properties": { - "Base": { - "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", - "title": "Base", - "type": "number" - }, - "CapacityProvider": { - "markdownDescription": "The short name of the capacity provider.", - "title": "CapacityProvider", - "type": "string" - }, - "Weight": { - "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", - "title": "Weight", - "type": "number" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ClusterConfiguration": { - "additionalProperties": false, - "properties": { - "ExecuteCommandConfiguration": { - "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandConfiguration", - "markdownDescription": "The details of the execute command configuration.", - "title": "ExecuteCommandConfiguration" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ClusterSettings": { + "AWS::ECS::CapacityProvider.AutoScalingGroupProvider": { "additionalProperties": false, "properties": { - "Name": { - "markdownDescription": "The name of the cluster setting. The value is `containerInsights` .", - "title": "Name", + "AutoScalingGroupArn": { + "markdownDescription": "The Amazon Resource Name (ARN) that identifies the Auto Scaling group, or the Auto Scaling group name.", + "title": "AutoScalingGroupArn", "type": "string" }, - "Value": { - "markdownDescription": "The value to set for the cluster setting. The supported values are `enabled` and `disabled` .\n\nIf you set `name` to `containerInsights` and `value` to `enabled` , CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the `containerInsights` account setting is turned on. If a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", - "title": "Value", - "type": "string" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ExecuteCommandConfiguration": { - "additionalProperties": false, - "properties": { - "KmsKeyId": { - "markdownDescription": "Specify an AWS Key Management Service key ID to encrypt the data between the local client and the container.", - "title": "KmsKeyId", + "ManagedDraining": { + "markdownDescription": "The managed draining option for the Auto Scaling group capacity provider. When you enable this, Amazon ECS manages and gracefully drains the EC2 container instances that are in the Auto Scaling group capacity provider.", + "title": "ManagedDraining", "type": "string" }, - "LogConfiguration": { - "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandLogConfiguration", - "markdownDescription": "The log configuration for the results of the execute command actions. The logs can be sent to CloudWatch Logs or an Amazon S3 bucket. When `logging=OVERRIDE` is specified, a `logConfiguration` must be provided.", - "title": "LogConfiguration" + "ManagedScaling": { + "$ref": "#/definitions/AWS::ECS::CapacityProvider.ManagedScaling", + "markdownDescription": "The managed scaling settings for the Auto Scaling group capacity provider.", + "title": "ManagedScaling" }, - "Logging": { - "markdownDescription": "The log setting to use for redirecting logs for your execute command results. The following log settings are available.\n\n- `NONE` : The execute command session is not logged.\n- `DEFAULT` : The `awslogs` configuration in the task definition is used. If no logging parameter is specified, it defaults to this value. If no `awslogs` log driver is configured in the task definition, the output won't be logged.\n- `OVERRIDE` : Specify the logging details as a part of `logConfiguration` . If the `OVERRIDE` logging option is specified, the `logConfiguration` is required.", - "title": "Logging", + "ManagedTerminationProtection": { + "markdownDescription": "The managed termination protection setting to use for the Auto Scaling group capacity provider. This determines whether the Auto Scaling group has managed termination protection. The default is off.\n\n> When using managed termination protection, managed scaling must also be used otherwise managed termination protection doesn't work. \n\nWhen managed termination protection is on, Amazon ECS prevents the Amazon EC2 instances in an Auto Scaling group that contain tasks from being terminated during a scale-in action. The Auto Scaling group and each instance in the Auto Scaling group must have instance protection from scale-in actions on as well. For more information, see [Instance Protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection) in the *AWS Auto Scaling User Guide* .\n\nWhen managed termination protection is off, your Amazon EC2 instances aren't protected from termination when the Auto Scaling group scales in.", + "title": "ManagedTerminationProtection", "type": "string" } }, + "required": [ + "AutoScalingGroupArn" + ], "type": "object" }, - "AWS::ECS::Cluster.ExecuteCommandLogConfiguration": { + "AWS::ECS::CapacityProvider.ManagedScaling": { "additionalProperties": false, "properties": { - "CloudWatchEncryptionEnabled": { - "markdownDescription": "Determines whether to use encryption on the CloudWatch logs. If not specified, encryption will be off.", - "title": "CloudWatchEncryptionEnabled", - "type": "boolean" - }, - "CloudWatchLogGroupName": { - "markdownDescription": "The name of the CloudWatch log group to send logs to.\n\n> The CloudWatch log group must already be created.", - "title": "CloudWatchLogGroupName", - "type": "string" + "InstanceWarmupPeriod": { + "markdownDescription": "The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of `300` seconds is used.", + "title": "InstanceWarmupPeriod", + "type": "number" }, - "S3BucketName": { - "markdownDescription": "The name of the S3 bucket to send logs to.\n\n> The S3 bucket must already be created.", - "title": "S3BucketName", - "type": "string" + "MaximumScalingStepSize": { + "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "title": "MaximumScalingStepSize", + "type": "number" }, - "S3EncryptionEnabled": { - "markdownDescription": "Determines whether to use encryption on the S3 logs. If not specified, encryption is not used.", - "title": "S3EncryptionEnabled", - "type": "boolean" + "MinimumScalingStepSize": { + "markdownDescription": "The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter If this parameter is omitted, the default value of `1` is used.\n\nWhen additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the actual demand is less than the minimum scaling step size.\n\nIf you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value and will ignore both the maximum scaling step size as well as the capacity demand.", + "title": "MinimumScalingStepSize", + "type": "number" }, - "S3KeyPrefix": { - "markdownDescription": "An optional folder in the S3 bucket to place logs in.", - "title": "S3KeyPrefix", - "type": "string" - } - }, - "type": "object" - }, - "AWS::ECS::Cluster.ServiceConnectDefaults": { - "additionalProperties": false, - "properties": { - "Namespace": { - "markdownDescription": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. The namespace name can include up to 1024 characters. The name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than (<), or slash (/).\n\nIf you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this AWS Region.\n\nIf you enter a new name, a AWS Cloud Map namespace will be created. Amazon ECS creates a AWS Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the AWS Command Line Interface . Other types of instance discovery aren't used by Service Connect.\n\nIf you update the cluster with an empty string `\"\"` for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in AWS Cloud Map and must be deleted separately.\n\nFor more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* .", - "title": "Namespace", + "Status": { + "markdownDescription": "Determines whether to use managed scaling for the capacity provider.", + "title": "Status", "type": "string" + }, + "TargetCapacity": { + "markdownDescription": "The target capacity utilization as a percentage for the capacity provider. The specified value must be greater than `0` and less than or equal to `100` . For example, if you want the capacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a `targetCapacity` of `90` . The default value of `100` percent results in the Amazon EC2 instances in your Auto Scaling group being completely used.", + "title": "TargetCapacity", + "type": "number" } }, "type": "object" }, - "AWS::ECS::ClusterCapacityProviderAssociations": { + "AWS::ECS::Cluster": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CapacityProviders": { + "items": { + "type": "string" + }, + "markdownDescription": "The short name of one or more capacity providers to associate with the cluster. A capacity provider must be associated with a cluster before it can be included as part of the default capacity provider strategy of the cluster or used in a capacity provider strategy when calling the [CreateService](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateService.html) or [RunTask](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html) actions.\n\nIf specifying a capacity provider that uses an Auto Scaling group, the capacity provider must be created but not associated with another cluster. New Auto Scaling group capacity providers can be created with the [CreateCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_CreateCapacityProvider.html) API operation.\n\nTo use a AWS Fargate capacity provider, specify either the `FARGATE` or `FARGATE_SPOT` capacity providers. The AWS Fargate capacity providers are available to all accounts and only need to be associated with a cluster to be used.\n\nThe [PutCapacityProvider](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutCapacityProvider.html) API operation is used to update the list of available capacity providers for a cluster after the cluster is created.", + "title": "CapacityProviders", + "type": "array" + }, + "ClusterName": { + "markdownDescription": "A user-generated string that you use to identify your cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID for the name.", + "title": "ClusterName", + "type": "string" + }, + "ClusterSettings": { + "items": { + "$ref": "#/definitions/AWS::ECS::Cluster.ClusterSettings" + }, + "markdownDescription": "The settings to use when creating a cluster. This parameter is used to turn on CloudWatch Container Insights for a cluster.", + "title": "ClusterSettings", + "type": "array" + }, + "Configuration": { + "$ref": "#/definitions/AWS::ECS::Cluster.ClusterConfiguration", + "markdownDescription": "The execute command configuration for the cluster.", + "title": "Configuration" + }, + "DefaultCapacityProviderStrategy": { + "items": { + "$ref": "#/definitions/AWS::ECS::Cluster.CapacityProviderStrategyItem" + }, + "markdownDescription": "The default capacity provider strategy for the cluster. When services or tasks are run in the cluster with no launch type or capacity provider strategy specified, the default capacity provider strategy is used.", + "title": "DefaultCapacityProviderStrategy", + "type": "array" + }, + "ServiceConnectDefaults": { + "$ref": "#/definitions/AWS::ECS::Cluster.ServiceConnectDefaults", + "markdownDescription": "Use this parameter to set a default Service Connect namespace. After you set a default Service Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as client services in the namespace. This setting only applies to new services that set the `enabled` parameter to `true` in the `ServiceConnectConfiguration` . You can set the namespace of each service individually in the `ServiceConnectConfiguration` to override this default parameter.\n\nTasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide* .", + "title": "ServiceConnectDefaults" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The metadata that you apply to the cluster to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::ECS::Cluster" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::ECS::Cluster.CapacityProviderStrategyItem": { + "additionalProperties": false, + "properties": { + "Base": { + "markdownDescription": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", + "title": "Base", + "type": "number" + }, + "CapacityProvider": { + "markdownDescription": "The short name of the capacity provider.", + "title": "CapacityProvider", + "type": "string" + }, + "Weight": { + "markdownDescription": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* .", + "title": "Weight", + "type": "number" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ClusterConfiguration": { + "additionalProperties": false, + "properties": { + "ExecuteCommandConfiguration": { + "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandConfiguration", + "markdownDescription": "The details of the execute command configuration.", + "title": "ExecuteCommandConfiguration" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ClusterSettings": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "The name of the cluster setting. The value is `containerInsights` .", + "title": "Name", + "type": "string" + }, + "Value": { + "markdownDescription": "The value to set for the cluster setting. The supported values are `enabled` and `disabled` .\n\nIf you set `name` to `containerInsights` and `value` to `enabled` , CloudWatch Container Insights will be on for the cluster, otherwise it will be off unless the `containerInsights` account setting is turned on. If a cluster value is specified, it will override the `containerInsights` value set with [PutAccountSetting](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSetting.html) or [PutAccountSettingDefault](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PutAccountSettingDefault.html) .", + "title": "Value", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ExecuteCommandConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "markdownDescription": "Specify an AWS Key Management Service key ID to encrypt the data between the local client and the container.", + "title": "KmsKeyId", + "type": "string" + }, + "LogConfiguration": { + "$ref": "#/definitions/AWS::ECS::Cluster.ExecuteCommandLogConfiguration", + "markdownDescription": "The log configuration for the results of the execute command actions. The logs can be sent to CloudWatch Logs or an Amazon S3 bucket. When `logging=OVERRIDE` is specified, a `logConfiguration` must be provided.", + "title": "LogConfiguration" + }, + "Logging": { + "markdownDescription": "The log setting to use for redirecting logs for your execute command results. The following log settings are available.\n\n- `NONE` : The execute command session is not logged.\n- `DEFAULT` : The `awslogs` configuration in the task definition is used. If no logging parameter is specified, it defaults to this value. If no `awslogs` log driver is configured in the task definition, the output won't be logged.\n- `OVERRIDE` : Specify the logging details as a part of `logConfiguration` . If the `OVERRIDE` logging option is specified, the `logConfiguration` is required.", + "title": "Logging", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ExecuteCommandLogConfiguration": { + "additionalProperties": false, + "properties": { + "CloudWatchEncryptionEnabled": { + "markdownDescription": "Determines whether to use encryption on the CloudWatch logs. If not specified, encryption will be off.", + "title": "CloudWatchEncryptionEnabled", + "type": "boolean" + }, + "CloudWatchLogGroupName": { + "markdownDescription": "The name of the CloudWatch log group to send logs to.\n\n> The CloudWatch log group must already be created.", + "title": "CloudWatchLogGroupName", + "type": "string" + }, + "S3BucketName": { + "markdownDescription": "The name of the S3 bucket to send logs to.\n\n> The S3 bucket must already be created.", + "title": "S3BucketName", + "type": "string" + }, + "S3EncryptionEnabled": { + "markdownDescription": "Determines whether to use encryption on the S3 logs. If not specified, encryption is not used.", + "title": "S3EncryptionEnabled", + "type": "boolean" + }, + "S3KeyPrefix": { + "markdownDescription": "An optional folder in the S3 bucket to place logs in.", + "title": "S3KeyPrefix", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::Cluster.ServiceConnectDefaults": { + "additionalProperties": false, + "properties": { + "Namespace": { + "markdownDescription": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace that's used when you create a service and don't specify a Service Connect configuration. The namespace name can include up to 1024 characters. The name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than (>), less than (<), or slash (/).\n\nIf you enter an existing namespace name or ARN, then that namespace will be used. Any namespace type is supported. The namespace must be in this account and this AWS Region.\n\nIf you enter a new name, a AWS Cloud Map namespace will be created. Amazon ECS creates a AWS Cloud Map namespace with the \"API calls\" method of instance discovery only. This instance discovery method is the \"HTTP\" namespace type in the AWS Command Line Interface . Other types of instance discovery aren't used by Service Connect.\n\nIf you update the cluster with an empty string `\"\"` for the namespace name, the cluster configuration for Service Connect is removed. Note that the namespace will remain in AWS Cloud Map and must be deleted separately.\n\nFor more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* .", + "title": "Namespace", + "type": "string" + } + }, + "type": "object" + }, + "AWS::ECS::ClusterCapacityProviderAssociations": { "additionalProperties": false, "properties": { "Condition": { @@ -81116,7 +83923,7 @@ }, "RuntimePlatform": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.RuntimePlatform", - "markdownDescription": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.\n\nWhen you specify a task definition in a service, this value must match the `runtimePlatform` value of the service.", + "markdownDescription": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", "title": "RuntimePlatform" }, "Tags": { @@ -81593,6 +84400,51 @@ }, "type": "object" }, + "AWS::ECS::TaskDefinition.FSxAuthorizationConfig": { + "additionalProperties": false, + "properties": { + "CredentialsParameter": { + "markdownDescription": "", + "title": "CredentialsParameter", + "type": "string" + }, + "Domain": { + "markdownDescription": "", + "title": "Domain", + "type": "string" + } + }, + "required": [ + "CredentialsParameter", + "Domain" + ], + "type": "object" + }, + "AWS::ECS::TaskDefinition.FSxWindowsFileServerVolumeConfiguration": { + "additionalProperties": false, + "properties": { + "AuthorizationConfig": { + "$ref": "#/definitions/AWS::ECS::TaskDefinition.FSxAuthorizationConfig", + "markdownDescription": "The authorization configuration details for the Amazon FSx for Windows File Server file system.", + "title": "AuthorizationConfig" + }, + "FileSystemId": { + "markdownDescription": "The Amazon FSx for Windows File Server file system ID to use.", + "title": "FileSystemId", + "type": "string" + }, + "RootDirectory": { + "markdownDescription": "The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.", + "title": "RootDirectory", + "type": "string" + } + }, + "required": [ + "FileSystemId", + "RootDirectory" + ], + "type": "object" + }, "AWS::ECS::TaskDefinition.FirelensConfiguration": { "additionalProperties": false, "properties": { @@ -82067,6 +84919,11 @@ "markdownDescription": "This parameter is specified when you use an Amazon Elastic File System file system for task storage.", "title": "EFSVolumeConfiguration" }, + "FSxWindowsFileServerVolumeConfiguration": { + "$ref": "#/definitions/AWS::ECS::TaskDefinition.FSxWindowsFileServerVolumeConfiguration", + "markdownDescription": "This parameter is specified when you use Amazon FSx for Windows File Server file system for task storage.", + "title": "FSxWindowsFileServerVolumeConfiguration" + }, "Host": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HostVolumeProperties", "markdownDescription": "This parameter is specified when you use bind mount host volumes. The contents of the `host` parameter determine whether your bind mount host volume persists on the host container instance and where it's stored. If the `host` parameter is empty, then the Docker daemon assigns a host path for your data volume. However, the data isn't guaranteed to persist after the containers that are associated with it stop running.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives. For example, you can mount `C:\\my\\path:C:\\my\\path` and `D:\\:D:\\` , but not `D:\\my\\path:C:\\my\\path` or `D:\\:C:\\my\\path` .", @@ -82717,7 +85574,7 @@ "additionalProperties": false, "properties": { "AvailabilityZoneName": { - "markdownDescription": "The AWS For One Zone file systems, the replication configuration must specify the Availability Zone in which the destination file system is located.\n\nUse the format `us-east-1a` to specify the Availability Zone. For more information about One Zone file systems, see [EFS file system types](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the *Amazon EFS User Guide* .\n\n> One Zone file system type is not available in all Availability Zones in AWS Regions where Amazon EFS is available.", + "markdownDescription": "For One Zone file systems, the replication configuration must specify the Availability Zone in which the destination file system is located.\n\nUse the format `us-east-1a` to specify the Availability Zone. For more information about One Zone file systems, see [EFS file system types](https://docs.aws.amazon.com/efs/latest/ug/storage-classes.html) in the *Amazon EFS User Guide* .\n\n> One Zone file system type is not available in all Availability Zones in AWS Regions where Amazon EFS is available.", "title": "AvailabilityZoneName", "type": "string" }, @@ -84782,7 +87639,7 @@ "type": "string" }, "KeepJobFlowAliveWhenNoSteps": { - "markdownDescription": "Specifies whether the cluster should remain available after completing all steps. Defaults to `true` . For more information about configuring cluster termination, see [Control Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html) in the *EMR Management Guide* .", + "markdownDescription": "Specifies whether the cluster should remain available after completing all steps. Defaults to `false` . For more information about configuring cluster termination, see [Control Cluster Termination](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-plan-termination.html) in the *EMR Management Guide* .", "title": "KeepJobFlowAliveWhenNoSteps", "type": "boolean" }, @@ -88301,6 +91158,11 @@ "title": "Maximum", "type": "number" }, + "Minimum": { + "markdownDescription": "The lower limit for data storage the cache is set to use.", + "title": "Minimum", + "type": "number" + }, "Unit": { "markdownDescription": "The unit that the storage is measured in, in GB.", "title": "Unit", @@ -88308,7 +91170,6 @@ } }, "required": [ - "Maximum", "Unit" ], "type": "object" @@ -88320,11 +91181,13 @@ "markdownDescription": "The configuration for the maximum number of ECPUs the cache can consume per second.", "title": "Maximum", "type": "number" + }, + "Minimum": { + "markdownDescription": "The configuration for the minimum number of ECPUs the cache should be able consume per second.", + "title": "Minimum", + "type": "number" } }, - "required": [ - "Maximum" - ], "type": "object" }, "AWS::ElastiCache::ServerlessCache.Endpoint": { @@ -91867,7 +94730,7 @@ "type": "string" }, "Type": { - "markdownDescription": "", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -91940,6 +94803,176 @@ ], "type": "object" }, + "AWS::EntityResolution::IdNamespace": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Description": { + "markdownDescription": "The description of the ID namespace.", + "title": "Description", + "type": "string" + }, + "IdMappingWorkflowProperties": { + "items": { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace.IdNamespaceIdMappingWorkflowProperties" + }, + "markdownDescription": "Determines the properties of `IdMappingWorflow` where this `IdNamespace` can be used as a `Source` or a `Target` .", + "title": "IdMappingWorkflowProperties", + "type": "array" + }, + "IdNamespaceName": { + "markdownDescription": "The name of the ID namespace.", + "title": "IdNamespaceName", + "type": "string" + }, + "InputSourceConfig": { + "items": { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace.IdNamespaceInputSource" + }, + "markdownDescription": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", + "title": "InputSourceConfig", + "type": "array" + }, + "RoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access the resources defined in this `IdNamespace` on your behalf as part of the workflow run.", + "title": "RoleArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags used to organize, track, or control access for this resource.", + "title": "Tags", + "type": "array" + }, + "Type": { + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "title": "Type", + "type": "string" + } + }, + "required": [ + "IdNamespaceName", + "Type" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::EntityResolution::IdNamespace" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::EntityResolution::IdNamespace.IdNamespaceIdMappingWorkflowProperties": { + "additionalProperties": false, + "properties": { + "IdMappingType": { + "markdownDescription": "The type of ID mapping.", + "title": "IdMappingType", + "type": "string" + }, + "ProviderProperties": { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace.NamespaceProviderProperties", + "markdownDescription": "An object which defines any additional configurations required by the provider service.", + "title": "ProviderProperties" + } + }, + "required": [ + "IdMappingType" + ], + "type": "object" + }, + "AWS::EntityResolution::IdNamespace.IdNamespaceInputSource": { + "additionalProperties": false, + "properties": { + "InputSourceARN": { + "markdownDescription": "An AWS Glue table ARN for the input source table.", + "title": "InputSourceARN", + "type": "string" + }, + "SchemaName": { + "markdownDescription": "The name of the schema.", + "title": "SchemaName", + "type": "string" + } + }, + "required": [ + "InputSourceARN" + ], + "type": "object" + }, + "AWS::EntityResolution::IdNamespace.NamespaceProviderProperties": { + "additionalProperties": false, + "properties": { + "ProviderConfiguration": { + "additionalProperties": true, + "markdownDescription": "An object which defines any additional configurations required by the provider service.", + "patternProperties": { + "^[a-zA-Z0-9]+$": { + "type": "string" + } + }, + "title": "ProviderConfiguration", + "type": "object" + }, + "ProviderServiceArn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the provider service.", + "title": "ProviderServiceArn", + "type": "string" + } + }, + "required": [ + "ProviderServiceArn" + ], + "type": "object" + }, "AWS::EntityResolution::MatchingWorkflow": { "additionalProperties": false, "properties": { @@ -92238,7 +95271,7 @@ ], "type": "object" }, - "AWS::EntityResolution::SchemaMapping": { + "AWS::EntityResolution::PolicyStatement": { "additionalProperties": false, "properties": { "Condition": { @@ -92273,42 +95306,52 @@ "Properties": { "additionalProperties": false, "properties": { - "Description": { - "markdownDescription": "A description of the schema.", - "title": "Description", - "type": "string" - }, - "MappedInputFields": { + "Action": { "items": { - "$ref": "#/definitions/AWS::EntityResolution::SchemaMapping.SchemaInputAttribute" + "type": "string" }, - "markdownDescription": "A list of `MappedInputFields` . Each `MappedInputField` corresponds to a column the source data table, and contains column name plus additional information that AWS Entity Resolution uses for matching.", - "title": "MappedInputFields", + "markdownDescription": "The action that the principal can use on the resource.\n\nFor example, `entityresolution:GetIdMappingJob` , `entityresolution:GetMatchingJob` .", + "title": "Action", "type": "array" }, - "SchemaName": { - "markdownDescription": "The name of the schema. There can't be multiple `SchemaMappings` with the same name.", - "title": "SchemaName", + "Arn": { + "markdownDescription": "The Amazon Resource Name (ARN) of the resource that will be accessed by the principal.", + "title": "Arn", "type": "string" }, - "Tags": { + "Condition": { + "markdownDescription": "A set of condition keys that you can use in key policies.", + "title": "Condition", + "type": "string" + }, + "Effect": { + "markdownDescription": "Determines whether the permissions specified in the policy are to be allowed ( `Allow` ) or denied ( `Deny` ).", + "title": "Effect", + "type": "string" + }, + "Principal": { "items": { - "$ref": "#/definitions/Tag" + "type": "string" }, - "markdownDescription": "The tags used to organize, track, or control access for this resource.", - "title": "Tags", + "markdownDescription": "The AWS service or AWS account that can access the resource defined as ARN.", + "title": "Principal", "type": "array" + }, + "StatementId": { + "markdownDescription": "A statement identifier that differentiates the statement from others in the same policy.", + "title": "StatementId", + "type": "string" } }, "required": [ - "MappedInputFields", - "SchemaName" + "Arn", + "StatementId" ], "type": "object" }, "Type": { "enum": [ - "AWS::EntityResolution::SchemaMapping" + "AWS::EntityResolution::PolicyStatement" ], "type": "string" }, @@ -92327,42 +95370,7 @@ ], "type": "object" }, - "AWS::EntityResolution::SchemaMapping.SchemaInputAttribute": { - "additionalProperties": false, - "properties": { - "FieldName": { - "markdownDescription": "A string containing the field name.", - "title": "FieldName", - "type": "string" - }, - "GroupName": { - "markdownDescription": "Instruct AWS Entity Resolution to combine several columns into a unified column with the identical attribute type. For example, when working with columns such as first_name, middle_name, and last_name, assigning them a common `GroupName` will prompt AWS Entity Resolution to concatenate them into a single value.", - "title": "GroupName", - "type": "string" - }, - "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, let's consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning the `MatchKey` *Address* to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `MatchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", - "title": "MatchKey", - "type": "string" - }, - "SubType": { - "markdownDescription": "The subtype of the attribute, selected from a list of values.", - "title": "SubType", - "type": "string" - }, - "Type": { - "markdownDescription": "The type of the attribute, selected from a list of values.", - "title": "Type", - "type": "string" - } - }, - "required": [ - "FieldName", - "Type" - ], - "type": "object" - }, - "AWS::EventSchemas::Discoverer": { + "AWS::EntityResolution::SchemaMapping": { "additionalProperties": false, "properties": { "Condition": { @@ -92397,38 +95405,42 @@ "Properties": { "additionalProperties": false, "properties": { - "CrossAccount": { - "markdownDescription": "Allows for the discovery of the event schemas that are sent to the event bus from another account.", - "title": "CrossAccount", - "type": "boolean" - }, "Description": { - "markdownDescription": "A description for the discoverer.", + "markdownDescription": "A description of the schema.", "title": "Description", "type": "string" }, - "SourceArn": { - "markdownDescription": "The ARN of the event bus.", - "title": "SourceArn", + "MappedInputFields": { + "items": { + "$ref": "#/definitions/AWS::EntityResolution::SchemaMapping.SchemaInputAttribute" + }, + "markdownDescription": "A list of `MappedInputFields` . Each `MappedInputField` corresponds to a column the source data table, and contains column name plus additional information that AWS Entity Resolution uses for matching.", + "title": "MappedInputFields", + "type": "array" + }, + "SchemaName": { + "markdownDescription": "The name of the schema. There can't be multiple `SchemaMappings` with the same name.", + "title": "SchemaName", "type": "string" }, "Tags": { "items": { - "$ref": "#/definitions/AWS::EventSchemas::Discoverer.TagsEntry" + "$ref": "#/definitions/Tag" }, - "markdownDescription": "Tags associated with the resource.", + "markdownDescription": "The tags used to organize, track, or control access for this resource.", "title": "Tags", "type": "array" } }, "required": [ - "SourceArn" + "MappedInputFields", + "SchemaName" ], "type": "object" }, "Type": { "enum": [ - "AWS::EventSchemas::Discoverer" + "AWS::EntityResolution::SchemaMapping" ], "type": "string" }, @@ -92447,27 +95459,147 @@ ], "type": "object" }, - "AWS::EventSchemas::Discoverer.TagsEntry": { + "AWS::EntityResolution::SchemaMapping.SchemaInputAttribute": { "additionalProperties": false, "properties": { - "Key": { - "markdownDescription": "The key of a key-value pair.", - "title": "Key", + "FieldName": { + "markdownDescription": "A string containing the field name.", + "title": "FieldName", "type": "string" }, - "Value": { - "markdownDescription": "The value of a key-value pair.", - "title": "Value", + "GroupName": { + "markdownDescription": "A string that instructs AWS Entity Resolution to combine several columns into a unified column with the identical attribute type.\n\nFor example, when working with columns such as `first_name` , `middle_name` , and `last_name` , assigning them a common `groupName` will prompt AWS Entity Resolution to concatenate them into a single value.", + "title": "GroupName", + "type": "string" + }, + "MatchKey": { + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "title": "MatchKey", + "type": "string" + }, + "SubType": { + "markdownDescription": "The subtype of the attribute, selected from a list of values.", + "title": "SubType", + "type": "string" + }, + "Type": { + "markdownDescription": "The type of the attribute, selected from a list of values.", + "title": "Type", "type": "string" } }, "required": [ - "Key", - "Value" + "FieldName", + "Type" ], "type": "object" }, - "AWS::EventSchemas::Registry": { + "AWS::EventSchemas::Discoverer": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "CrossAccount": { + "markdownDescription": "Allows for the discovery of the event schemas that are sent to the event bus from another account.", + "title": "CrossAccount", + "type": "boolean" + }, + "Description": { + "markdownDescription": "A description for the discoverer.", + "title": "Description", + "type": "string" + }, + "SourceArn": { + "markdownDescription": "The ARN of the event bus.", + "title": "SourceArn", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/AWS::EventSchemas::Discoverer.TagsEntry" + }, + "markdownDescription": "Tags associated with the resource.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "SourceArn" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::EventSchemas::Discoverer" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::EventSchemas::Discoverer.TagsEntry": { + "additionalProperties": false, + "properties": { + "Key": { + "markdownDescription": "The key of a key-value pair.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value of a key-value pair.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Key", + "Value" + ], + "type": "object" + }, + "AWS::EventSchemas::Registry": { "additionalProperties": false, "properties": { "Condition": { @@ -100731,6 +103863,116 @@ ], "type": "object" }, + "AWS::GlobalAccelerator::CrossAccountAttachment": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "", + "title": "Name", + "type": "string" + }, + "Principals": { + "items": { + "type": "string" + }, + "markdownDescription": "", + "title": "Principals", + "type": "array" + }, + "Resources": { + "items": { + "$ref": "#/definitions/AWS::GlobalAccelerator::CrossAccountAttachment.Resource" + }, + "markdownDescription": "", + "title": "Resources", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::GlobalAccelerator::CrossAccountAttachment" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::GlobalAccelerator::CrossAccountAttachment.Resource": { + "additionalProperties": false, + "properties": { + "EndpointId": { + "markdownDescription": "The endpoint ID for the endpoint that is specified as a AWS resource.\n\nAn endpoint ID for the cross-account feature is the ARN of an AWS resource, such as a Network Load Balancer, that Global Accelerator supports as an endpoint for an accelerator.", + "title": "EndpointId", + "type": "string" + }, + "Region": { + "markdownDescription": "The AWS Region where a shared endpoint resource is located.", + "title": "Region", + "type": "string" + } + }, + "required": [ + "EndpointId" + ], + "type": "object" + }, "AWS::GlobalAccelerator::EndpointGroup": { "additionalProperties": false, "properties": { @@ -111617,6 +114859,108 @@ ], "type": "object" }, + "AWS::IVS::EncoderConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Encoder cnfiguration name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-encoderconfiguration-tag.html) .", + "title": "Tags", + "type": "array" + }, + "Video": { + "$ref": "#/definitions/AWS::IVS::EncoderConfiguration.Video", + "markdownDescription": "Video configuration. Default: video resolution 1280x720, bitrate 2500 kbps, 30 fps. See the [Video](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-encoderconfiguration-video.html) property type for more information.", + "title": "Video" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::EncoderConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::IVS::EncoderConfiguration.Video": { + "additionalProperties": false, + "properties": { + "Bitrate": { + "markdownDescription": "Bitrate for generated output, in bps. Default: 2500000.", + "title": "Bitrate", + "type": "number" + }, + "Framerate": { + "markdownDescription": "Video frame rate, in fps. Default: 30.", + "title": "Framerate", + "type": "number" + }, + "Height": { + "markdownDescription": "Video-resolution height. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 720.", + "title": "Height", + "type": "number" + }, + "Width": { + "markdownDescription": "Video-resolution width. Note that the maximum value is determined by width times height, such that the maximum total pixels is 2073600 (1920x1080 or 1080x1920). Default: 1280.", + "title": "Width", + "type": "number" + } + }, + "type": "object" + }, "AWS::IVS::PlaybackKeyPair": { "additionalProperties": false, "properties": { @@ -111693,6 +115037,103 @@ ], "type": "object" }, + "AWS::IVS::PlaybackRestrictionPolicy": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AllowedCountries": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of country codes that control geoblocking restrictions. Allowed values are the officially assigned ISO 3166-1 alpha-2 codes. Default: All countries (an empty array).", + "title": "AllowedCountries", + "type": "array" + }, + "AllowedOrigins": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of origin sites that control CORS restriction. Allowed values are the same as valid values of the Origin header defined at [https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin\"](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin)", + "title": "AllowedOrigins", + "type": "array" + }, + "EnableStrictOriginEnforcement": { + "markdownDescription": "Whether channel playback is constrained by the origin site.", + "title": "EnableStrictOriginEnforcement", + "type": "boolean" + }, + "Name": { + "markdownDescription": "Playback-restriction-policy name.", + "title": "Name", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-playbackrestrictionpolicy-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "AllowedCountries", + "AllowedOrigins" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::PlaybackRestrictionPolicy" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::IVS::RecordingConfiguration": { "additionalProperties": false, "properties": { @@ -111730,7 +115171,7 @@ "properties": { "DestinationConfiguration": { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration.DestinationConfiguration", - "markdownDescription": "A destination configuration contains information about where recorded video will be stored. See the DestinationConfiguration property type for more information.", + "markdownDescription": "A destination configuration describes an S3 bucket where recorded video will be stored. See the DestinationConfiguration property type for more information.", "title": "DestinationConfiguration" }, "Name": { @@ -111932,6 +115373,100 @@ ], "type": "object" }, + "AWS::IVS::StorageConfiguration": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Name": { + "markdownDescription": "Storage cnfiguration name.", + "title": "Name", + "type": "string" + }, + "S3": { + "$ref": "#/definitions/AWS::IVS::StorageConfiguration.S3StorageConfiguration", + "markdownDescription": "An S3 storage configuration contains information about where recorded video will be stored. See the [S3StorageConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-storageconfiguration-s3storageconfiguration.html) property type for more information.", + "title": "S3" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-storageconfiguration-tag.html) .", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "S3" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::IVS::StorageConfiguration" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::IVS::StorageConfiguration.S3StorageConfiguration": { + "additionalProperties": false, + "properties": { + "BucketName": { + "markdownDescription": "Name of the S3 bucket where recorded video will be stored.", + "title": "BucketName", + "type": "string" + } + }, + "required": [ + "BucketName" + ], + "type": "object" + }, "AWS::IVS::StreamKey": { "additionalProperties": false, "properties": { @@ -129369,6 +132904,11 @@ "title": "PendingWindowInDays", "type": "number" }, + "RotationPeriodInDays": { + "markdownDescription": "The number of days between each automatic rotation. The default value is 365 days.", + "title": "RotationPeriodInDays", + "type": "number" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -159955,6 +163495,14 @@ "Properties": { "additionalProperties": false, "properties": { + "Audiences": { + "items": { + "type": "string" + }, + "markdownDescription": "The list of audiences defined in channel.", + "title": "Audiences", + "type": "array" + }, "ChannelName": { "markdownDescription": "The name of the channel.", "title": "ChannelName", @@ -166386,6 +169934,11 @@ "title": "LabelTemplate", "type": "string" }, + "LinkConfiguration": { + "$ref": "#/definitions/AWS::Oam::Link.LinkConfiguration", + "markdownDescription": "", + "title": "LinkConfiguration" + }, "ResourceTypes": { "items": { "type": "string" @@ -166438,6 +169991,36 @@ ], "type": "object" }, + "AWS::Oam::Link.LinkConfiguration": { + "additionalProperties": false, + "properties": { + "LogGroupConfiguration": { + "$ref": "#/definitions/AWS::Oam::Link.LinkFilter", + "markdownDescription": "", + "title": "LogGroupConfiguration" + }, + "MetricConfiguration": { + "$ref": "#/definitions/AWS::Oam::Link.LinkFilter", + "markdownDescription": "", + "title": "MetricConfiguration" + } + }, + "type": "object" + }, + "AWS::Oam::Link.LinkFilter": { + "additionalProperties": false, + "properties": { + "Filter": { + "markdownDescription": "", + "title": "Filter", + "type": "string" + } + }, + "required": [ + "Filter" + ], + "type": "object" + }, "AWS::Oam::Sink": { "additionalProperties": false, "properties": { @@ -172285,7 +175868,7 @@ "title": "DatasetImportJob" }, "DatasetType": { - "markdownDescription": "One of the following values:\n\n- Interactions\n- Items\n- Users\n- Actions\n- Action_Interactions", + "markdownDescription": "One of the following values:\n\n- Interactions\n- Items\n- Users\n\n> You can't use CloudFormation to create an Action Interactions or Actions dataset.", "title": "DatasetType", "type": "string" }, @@ -174276,7 +177859,7 @@ }, "EmailMessage": { "$ref": "#/definitions/AWS::Pinpoint::Campaign.CampaignEmailMessage", - "markdownDescription": "The message that the campaign sends through the email channel. If specified, this message overrides the default message.", + "markdownDescription": "The message that the campaign sends through the email channel. If specified, this message overrides the default message.\n\n> The maximum email message size is 200KB. You can use email templates to send larger email messages.", "title": "EmailMessage" }, "GCMMessage": { @@ -182245,7 +185828,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range.", + "markdownDescription": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu.", "title": "Type", "type": "string" } @@ -182409,7 +185992,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", + "markdownDescription": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", "title": "Type", "type": "string" } @@ -182494,7 +186077,7 @@ "additionalProperties": false, "properties": { "AllSheets": { - "markdownDescription": "The configuration for applying a filter to all sheets.", + "markdownDescription": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "title": "AllSheets", "type": "object" }, @@ -182534,12 +186117,12 @@ "type": "string" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -182559,7 +186142,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", + "markdownDescription": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", "title": "Type", "type": "string" } @@ -185924,12 +189507,12 @@ "title": "DisplayOptions" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -194506,7 +198089,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range.", + "markdownDescription": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu.", "title": "Type", "type": "string" } @@ -194670,7 +198253,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", + "markdownDescription": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", "title": "Type", "type": "string" } @@ -194755,7 +198338,7 @@ "additionalProperties": false, "properties": { "AllSheets": { - "markdownDescription": "The configuration for applying a filter to all sheets.", + "markdownDescription": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "title": "AllSheets", "type": "object" }, @@ -194795,12 +198378,12 @@ "type": "string" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -194820,7 +198403,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", + "markdownDescription": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", "title": "Type", "type": "string" } @@ -198199,12 +201782,12 @@ "title": "DisplayOptions" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -208714,7 +212297,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The date time picker type of a `FilterDateTimePickerControl` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range.", + "markdownDescription": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu.", "title": "Type", "type": "string" } @@ -208878,7 +212461,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", + "markdownDescription": "The type of the `FilterListControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from the list.\n- `SINGLE_SELECT` : The user can select a single entry from the list.", "title": "Type", "type": "string" } @@ -208963,7 +212546,7 @@ "additionalProperties": false, "properties": { "AllSheets": { - "markdownDescription": "The configuration for applying a filter to all sheets.", + "markdownDescription": "The configuration that applies a filter to all sheets. When you choose `AllSheets` as the value for a `FilterScopeConfiguration` , this filter is applied to all visuals of all sheets in an Analysis, Dashboard, or Template. The `AllSheetsFilterScopeConfiguration` is chosen.", "title": "AllSheets", "type": "object" }, @@ -209003,12 +212586,12 @@ "type": "string" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -209028,7 +212611,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", + "markdownDescription": "The type of the `FilterSliderControl` . Choose one of the following options:\n\n- `SINGLE_POINT` : Filter against(equals) a single data point.\n- `RANGE` : Filter data that is in a specified range.", "title": "Type", "type": "string" } @@ -212370,12 +215953,12 @@ "title": "DisplayOptions" }, "MaximumValue": { - "markdownDescription": "The smaller value that is displayed at the left of the slider.", + "markdownDescription": "The larger value that is displayed at the right of the slider.", "title": "MaximumValue", "type": "number" }, "MinimumValue": { - "markdownDescription": "The larger value that is displayed at the right of the slider.", + "markdownDescription": "The smaller value that is displayed at the left of the slider.", "title": "MinimumValue", "type": "number" }, @@ -218919,6 +222502,11 @@ "title": "EngineVersion", "type": "string" }, + "ImageId": { + "markdownDescription": "A value that indicates the ID of the AMI.", + "title": "ImageId", + "type": "string" + }, "KMSKeyId": { "markdownDescription": "The AWS KMS key identifier for an encrypted CEV. A symmetric encryption KMS key is required for RDS Custom, but optional for Amazon RDS.\n\nIf you have an existing symmetric encryption KMS key in your account, you can use it with RDS Custom. No further action is necessary. If you don't already have a symmetric encryption KMS key in your account, follow the instructions in [Creating a symmetric encryption KMS key](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html#create-symmetric-cmk) in the *AWS Key Management Service Developer Guide* .\n\nYou can choose the same symmetric encryption key when you create a CEV and a DB instance, or choose different keys.", "title": "KMSKeyId", @@ -218929,6 +222517,11 @@ "title": "Manifest", "type": "string" }, + "SourceCustomDbEngineVersionIdentifier": { + "markdownDescription": "The ARN of a CEV to use as a source for creating a new CEV. You can specify a different Amazon Machine Imagine (AMI) by using either `Source` or `UseAwsProvidedLatestImage` . You can't specify a different JSON manifest when you specify `SourceCustomDbEngineVersionIdentifier` .", + "title": "SourceCustomDbEngineVersionIdentifier", + "type": "string" + }, "Status": { "markdownDescription": "A value that indicates the status of a custom engine version (CEV).", "title": "Status", @@ -218941,10 +222534,14 @@ "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.*", "title": "Tags", "type": "array" + }, + "UseAwsProvidedLatestImage": { + "markdownDescription": "Specifies whether to use the latest service-provided Amazon Machine Image (AMI) for the CEV. If you specify `UseAwsProvidedLatestImage` , you can't also specify `ImageId` .", + "title": "UseAwsProvidedLatestImage", + "type": "boolean" } }, "required": [ - "DatabaseInstallationFilesS3BucketName", "Engine", "EngineVersion" ], @@ -219063,7 +222660,7 @@ "type": "string" }, "DBInstanceParameterGroupName": { - "markdownDescription": "The name of the DB parameter group to apply to all instances of the DB cluster.\n\n> When you apply a parameter group using the `DBInstanceParameterGroupName` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. \n\nDefault: The existing name setting\n\nConstraints:\n\n- The DB parameter group must be in the same DB parameter group family as this DB cluster.", + "markdownDescription": "The name of the DB parameter group to apply to all instances of the DB cluster.\n\n> When you apply a parameter group using the `DBInstanceParameterGroupName` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window. \n\nValid for Cluster Type: Aurora DB clusters only\n\nDefault: The existing name setting\n\nConstraints:\n\n- The DB parameter group must be in the same DB parameter group family as this DB cluster.\n- The `DBInstanceParameterGroupName` parameter is valid in combination with the `AllowMajorVersionUpgrade` parameter for a major version upgrade only.", "title": "DBInstanceParameterGroupName", "type": "string" }, @@ -219126,7 +222723,7 @@ "type": "string" }, "EngineMode": { - "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", + "markdownDescription": "The DB engine mode of the DB cluster, either `provisioned` or `serverless` .\n\nThe `serverless` engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the `provisioned` engine mode.\n\nFor information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide* :\n\n- [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations)\n- [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html)\n\nValid for Cluster Type: Aurora DB clusters only", "title": "EngineMode", "type": "string" }, @@ -219743,7 +223340,7 @@ "title": "Endpoint" }, "Engine": { - "markdownDescription": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can change the architecture of an Oracle database from the non-container database (CDB) architecture to the CDB architecture by updating the `Engine` value in your templates from `oracle-ee` or `oracle-ee-cdb` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "markdownDescription": "The name of the database engine to use for this DB instance. Not every database engine is available in every AWS Region.\n\nThis property is required when creating a DB instance.\n\n> You can convert an Oracle database from the non-CDB architecture to the container database (CDB) architecture by updating the `Engine` value in your templates from `oracle-ee` to `oracle-ee-cdb` or from `oracle-se2` to `oracle-se2-cdb` . Converting to the CDB architecture requires an interruption. \n\nValid Values:\n\n- `aurora-mysql` (for Aurora MySQL DB instances)\n- `aurora-postgresql` (for Aurora PostgreSQL DB instances)\n- `custom-oracle-ee` (for RDS Custom for Oracle DB instances)\n- `custom-oracle-ee-cdb` (for RDS Custom for Oracle DB instances)\n- `custom-sqlserver-ee` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-se` (for RDS Custom for SQL Server DB instances)\n- `custom-sqlserver-web` (for RDS Custom for SQL Server DB instances)\n- `db2-ae`\n- `db2-se`\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Engine", "type": "string" }, @@ -219758,7 +223355,7 @@ "type": "number" }, "KmsKeyId": { - "markdownDescription": "The ARN of the AWS KMS key that's used to encrypt the DB instance, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the StorageEncrypted property but don't specify this property, AWS CloudFormation uses the default KMS key. If you specify this property, you must set the StorageEncrypted property to true.\n\nIf you specify the `SourceDBInstanceIdentifier` property, the value is inherited from the source DB instance if the read replica is created in the same region.\n\nIf you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the region that they're created in, and you can't use encryption keys from one region in another region.\n\nIf you specify the `SnapshotIdentifier` property, the `StorageEncrypted` property value is inherited from the snapshot, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify `DBSecurityGroups` , AWS CloudFormation ignores this property. To specify both a security group and this property, you must use a VPC security group. For more information about Amazon RDS and VPC, see [Using Amazon RDS with Amazon VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The KMS key identifier is managed by the DB cluster.", + "markdownDescription": "The ARN of the AWS KMS key that's used to encrypt the DB instance, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the StorageEncrypted property but don't specify this property, AWS CloudFormation uses the default KMS key. If you specify this property, you must set the StorageEncrypted property to true.\n\nIf you specify the `SourceDBInstanceIdentifier` property, the value is inherited from the source DB instance if the read replica is created in the same region.\n\nIf you create an encrypted read replica in a different AWS Region, then you must specify a KMS key for the destination AWS Region. KMS encryption keys are specific to the region that they're created in, and you can't use encryption keys from one region in another region.\n\nIf you specify the `DBSnapshotIdentifier` property, don't specify this property. The `StorageEncrypted` property value is inherited from the snapshot. If the DB instance is encrypted, the specified `KmsKeyId` property is also inherited from the snapshot.\n\nIf you specify `DBSecurityGroups` , AWS CloudFormation ignores this property. To specify both a security group and this property, you must use a VPC security group. For more information about Amazon RDS and VPC, see [Using Amazon RDS with Amazon VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The KMS key identifier is managed by the DB cluster.", "title": "KmsKeyId", "type": "string" }, @@ -219901,7 +223498,7 @@ "type": "string" }, "StorageEncrypted": { - "markdownDescription": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `DBSnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB instance is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB instance to be encrypted, then don't set this property or set it to `false` .\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", + "markdownDescription": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the snapshot.\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "title": "StorageEncrypted", "type": "boolean" }, @@ -222963,6 +226560,14 @@ "title": "RedshiftIdcApplicationArn", "type": "string" }, + "SnapshotCopyConfigurations": { + "items": { + "$ref": "#/definitions/AWS::RedshiftServerless::Namespace.SnapshotCopyConfiguration" + }, + "markdownDescription": "", + "title": "SnapshotCopyConfigurations", + "type": "array" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" @@ -223075,6 +226680,30 @@ }, "type": "object" }, + "AWS::RedshiftServerless::Namespace.SnapshotCopyConfiguration": { + "additionalProperties": false, + "properties": { + "DestinationKmsKeyId": { + "markdownDescription": "The ID of the KMS key to use to encrypt your snapshots in the destination AWS Region .", + "title": "DestinationKmsKeyId", + "type": "string" + }, + "DestinationRegion": { + "markdownDescription": "The destination AWS Region to copy snapshots to.", + "title": "DestinationRegion", + "type": "string" + }, + "SnapshotRetentionPeriod": { + "markdownDescription": "The retention period of snapshots that are copied to the destination AWS Region .", + "title": "SnapshotRetentionPeriod", + "type": "number" + } + }, + "required": [ + "DestinationRegion" + ], + "type": "object" + }, "AWS::RedshiftServerless::Workgroup": { "additionalProperties": false, "properties": { @@ -226615,7 +230244,7 @@ }, "QueryLoggingConfig": { "$ref": "#/definitions/AWS::Route53::HostedZone.QueryLoggingConfig", - "markdownDescription": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", + "markdownDescription": "Creates a configuration for DNS query logging. After you create a query logging configuration, Amazon Route 53 begins to publish log data to an Amazon CloudWatch Logs log group.\n\nDNS query logs contain information about the queries that Route 53 receives for a specified public hosted zone, such as the following:\n\n- Route 53 edge location that responded to the DNS query\n- Domain or subdomain that was requested\n- DNS record type, such as A or AAAA\n- DNS response code, such as `NoError` or `ServFail`\n\n- **Log Group and Resource Policy** - Before you create a query logging configuration, perform the following operations.\n\n> If you create a query logging configuration using the Route 53 console, Route 53 performs these operations automatically. \n\n- Create a CloudWatch Logs log group, and make note of the ARN, which you specify when you create a query logging configuration. Note the following:\n\n- You must create the log group in the us-east-1 region.\n- You must use the same AWS account to create the log group and the hosted zone that you want to configure query logging for.\n- When you create log groups for query logging, we recommend that you use a consistent prefix, for example:\n\n`/aws/route53/ *hosted zone name*`\n\nIn the next step, you'll create a resource policy, which controls access to one or more log groups and the associated AWS resources, such as Route 53 hosted zones. There's a limit on the number of resource policies that you can create, so we recommend that you use a consistent prefix so you can use the same resource policy for all the log groups that you create for query logging.\n- Create a CloudWatch Logs resource policy, and give it the permissions that Route 53 needs to create log streams and to send query logs to log streams. You must create the CloudWatch Logs resource policy in the us-east-1 region. For the value of `Resource` , specify the ARN for the log group that you created in the previous step. To use the same resource policy for all the CloudWatch Logs log groups that you created for query logging configurations, replace the hosted zone name with `*` , for example:\n\n`arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/*`\n\nTo avoid the confused deputy problem, a security issue where an entity without a permission for an action can coerce a more-privileged entity to perform it, you can optionally limit the permissions that a service has to a resource in a resource-based policy by supplying the following values:\n\n- For `aws:SourceArn` , supply the hosted zone ARN used in creating the query logging configuration. For example, `aws:SourceArn: arn:aws:route53:::hostedzone/hosted zone ID` .\n- For `aws:SourceAccount` , supply the account ID for the account that creates the query logging configuration. For example, `aws:SourceAccount:111111111111` .\n\nFor more information, see [The confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html) in the *AWS IAM User Guide* .\n\n> You can't use the CloudWatch console to create or edit a resource policy. You must use the CloudWatch API, one of the AWS SDKs, or the AWS CLI .\n- **Log Streams and Edge Locations** - When Route 53 finishes creating the configuration for DNS query logging, it does the following:\n\n- Creates a log stream for an edge location the first time that the edge location responds to DNS queries for the specified hosted zone. That log stream is used to log all queries that Route 53 responds to for that edge location.\n- Begins to send query logs to the applicable log stream.\n\nThe name of each log stream is in the following format:\n\n`*hosted zone ID* / *edge location code*`\n\nThe edge location code is a three-letter code and an arbitrarily assigned number, for example, DFW3. The three-letter code typically corresponds with the International Air Transport Association airport code for an airport near the edge location. (These abbreviations might change in the future.) For a list of edge locations, see \"The Route 53 Global Network\" on the [Route 53 Product Details](https://docs.aws.amazon.com/route53/details/) page.\n- **Queries That Are Logged** - Query logs contain only the queries that DNS resolvers forward to Route 53. If a DNS resolver has already cached the response to a query (such as the IP address for a load balancer for example.com), the resolver will continue to return the cached response. It doesn't forward another query to Route 53 until the TTL for the corresponding resource record set expires. Depending on how many DNS queries are submitted for a resource record set, and depending on the TTL for that resource record set, query logs might contain information about only one query out of every several thousand queries that are submitted to DNS. For more information about how DNS works, see [Routing Internet Traffic to Your Website or Web Application](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html) in the *Amazon Route 53 Developer Guide* .\n- **Log File Format** - For a list of the values in each query log and the format of each value, see [Logging DNS Queries](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/query-logs.html) in the *Amazon Route 53 Developer Guide* .\n- **Pricing** - For information about charges for query logs, see [Amazon CloudWatch Pricing](https://docs.aws.amazon.com/cloudwatch/pricing/) .\n- **How to Stop Logging** - If you want Route 53 to stop sending query logs to CloudWatch Logs, delete the query logging configuration. For more information, see [DeleteQueryLoggingConfig](https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteQueryLoggingConfig.html) .", "title": "QueryLoggingConfig" }, "VPCs": { @@ -226913,7 +230542,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" }, @@ -227332,7 +230961,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", + "markdownDescription": "The DNS record type. For information about different record types and how data is encoded for them, see [Supported DNS Resource Record Types](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) in the *Amazon Route 53 Developer Guide* .\n\nValid values for basic resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `DS` | `MX` | `NAPTR` | `NS` | `PTR` | `SOA` | `SPF` | `SRV` | `TXT`\n\nValues for weighted, latency, geolocation, and failover resource record sets: `A` | `AAAA` | `CAA` | `CNAME` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` . When creating a group of weighted, latency, geolocation, or failover resource record sets, specify the same value for all of the resource record sets in the group.\n\nValid values for multivalue answer resource record sets: `A` | `AAAA` | `MX` | `NAPTR` | `PTR` | `SPF` | `SRV` | `TXT` | `CAA`\n\n> SPF records were formerly used to verify the identity of the sender of email messages. However, we no longer recommend that you create resource record sets for which the value of `Type` is `SPF` . RFC 7208, *Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1* , has been updated to say, \"...[I]ts existence and mechanism defined in [RFC4408] have led to some interoperability issues. Accordingly, its use is no longer appropriate for SPF version 1; implementations are not to use it.\" In RFC 7208, see section 14.1, [The SPF DNS Record Type](https://docs.aws.amazon.com/http://tools.ietf.org/html/rfc7208#section-14.1) . \n\nValues for alias resource record sets:\n\n- *Amazon API Gateway custom regional APIs and edge-optimized APIs:* `A`\n- *CloudFront distributions:* `A`\n\nIf IPv6 is enabled for the distribution, create two resource record sets to route traffic to your distribution, one with a value of `A` and one with a value of `AAAA` .\n- *Amazon API Gateway environment that has a regionalized subdomain* : `A`\n- *ELB load balancers:* `A` | `AAAA`\n- *Amazon S3 buckets:* `A`\n- *Amazon Virtual Private Cloud interface VPC endpoints* `A`\n- *Another resource record set in this hosted zone:* Specify the type of the resource record set that you're creating the alias for. All values are supported except `NS` and `SOA` .\n\n> If you're creating an alias record that has the same name as the hosted zone (known as the zone apex), you can't route traffic to a record for which the value of `Type` is `CNAME` . This is because the alias record must have the same type as the record you're routing traffic to, and creating a CNAME record for the zone apex isn't supported even for an alias record.", "title": "Type", "type": "string" }, @@ -235551,7 +239180,7 @@ "type": "string" }, "Name": { - "markdownDescription": "A name for the SSM document.\n\n> You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n> \n> - `aws`\n> - `amazon`\n> - `amzn`", + "markdownDescription": "A name for the SSM document.\n\n> You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n> \n> - `aws`\n> - `amazon`\n> - `amzn`\n> - `AWSEC2`\n> - `AWSConfigRemediation`\n> - `AWSSupport`", "title": "Name", "type": "string" }, @@ -238543,6 +242172,11 @@ "title": "AppImageConfigName", "type": "string" }, + "CodeEditorAppImageConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.CodeEditorAppImageConfig", + "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", + "title": "CodeEditorAppImageConfig" + }, "JupyterLabAppImageConfig": { "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.JupyterLabAppImageConfig", "markdownDescription": "The configuration for the file system and the runtime, such as the environment variables and entry point.", @@ -238588,6 +242222,17 @@ ], "type": "object" }, + "AWS::SageMaker::AppImageConfig.CodeEditorAppImageConfig": { + "additionalProperties": false, + "properties": { + "ContainerConfig": { + "$ref": "#/definitions/AWS::SageMaker::AppImageConfig.ContainerConfig", + "markdownDescription": "", + "title": "ContainerConfig" + } + }, + "type": "object" + }, "AWS::SageMaker::AppImageConfig.ContainerConfig": { "additionalProperties": false, "properties": { @@ -239680,6 +243325,14 @@ "AWS::SageMaker::Domain.CodeEditorAppSettings": { "additionalProperties": false, "properties": { + "CustomImages": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::Domain.CustomImage" + }, + "markdownDescription": "A list of custom SageMaker images that are configured to run as a Code Editor app.", + "title": "CustomImages", + "type": "array" + }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::Domain.ResourceSpec", "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", @@ -241183,7 +244836,7 @@ "additionalProperties": false, "properties": { "FeatureName": { - "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start and end with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", + "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", "title": "FeatureName", "type": "string" }, @@ -247491,6 +251144,14 @@ "AWS::SageMaker::UserProfile.CodeEditorAppSettings": { "additionalProperties": false, "properties": { + "CustomImages": { + "items": { + "$ref": "#/definitions/AWS::SageMaker::UserProfile.CustomImage" + }, + "markdownDescription": "A list of custom SageMaker images that are configured to run as a Code Editor app.", + "title": "CustomImages", + "type": "array" + }, "DefaultResourceSpec": { "$ref": "#/definitions/AWS::SageMaker::UserProfile.ResourceSpec", "markdownDescription": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", @@ -249716,6 +253377,73 @@ ], "type": "object" }, + "AWS::SecurityHub::DelegatedAdmin": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AdminAccountId": { + "markdownDescription": "The AWS account identifier of the account to designate as the Security Hub administrator account.", + "title": "AdminAccountId", + "type": "string" + } + }, + "required": [ + "AdminAccountId" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityHub::DelegatedAdmin" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::SecurityHub::Hub": { "additionalProperties": false, "properties": { @@ -249800,6 +253528,1094 @@ ], "type": "object" }, + "AWS::SecurityHub::Insight": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Filters": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.AwsSecurityFindingFilters", + "markdownDescription": "One or more attributes used to filter the findings included in the insight. The insight only includes findings that match the criteria defined in the filters. You can filter by up to ten finding attributes. For each attribute, you can provide up to 20 filter values.", + "title": "Filters" + }, + "GroupByAttribute": { + "markdownDescription": "The grouping attribute for the insight's findings. Indicates how to group the matching findings, and identifies the type of item that the insight applies to. For example, if an insight is grouped by resource identifier, then the insight produces a list of resource identifiers.", + "title": "GroupByAttribute", + "type": "string" + }, + "Name": { + "markdownDescription": "The name of a Security Hub insight.", + "title": "Name", + "type": "string" + } + }, + "required": [ + "Filters", + "GroupByAttribute", + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityHub::Insight" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.AwsSecurityFindingFilters": { + "additionalProperties": false, + "properties": { + "AwsAccountId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The AWS account ID in which a finding is generated.", + "title": "AwsAccountId", + "type": "array" + }, + "AwsAccountName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the AWS account in which a finding is generated.", + "title": "AwsAccountName", + "type": "array" + }, + "CompanyName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the findings provider (company) that owns the solution (product) that generates findings.", + "title": "CompanyName", + "type": "array" + }, + "ComplianceAssociatedStandardsId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The unique identifier of a standard in which a control is enabled. This field consists of the resource portion of the Amazon Resource Name (ARN) returned for a standard in the [DescribeStandards](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_DescribeStandards.html) API response.", + "title": "ComplianceAssociatedStandardsId", + "type": "array" + }, + "ComplianceSecurityControlId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The unique identifier of a control across standards. Values for this field typically consist of an AWS service and a number, such as APIGateway.5.", + "title": "ComplianceSecurityControlId", + "type": "array" + }, + "ComplianceSecurityControlParametersName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of a security control parameter.", + "title": "ComplianceSecurityControlParametersName", + "type": "array" + }, + "ComplianceSecurityControlParametersValue": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The current value of a security control parameter.", + "title": "ComplianceSecurityControlParametersValue", + "type": "array" + }, + "ComplianceStatus": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Exclusive to findings that are generated as the result of a check run against a specific rule in a supported standard, such as CIS AWS Foundations. Contains security standard-related finding details.", + "title": "ComplianceStatus", + "type": "array" + }, + "Confidence": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "A finding's confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", + "title": "Confidence", + "type": "array" + }, + "CreatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider created the potential security issue that a finding reflects.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "CreatedAt", + "type": "array" + }, + "Criticality": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The level of importance assigned to the resources associated with the finding.\n\nA score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.", + "title": "Criticality", + "type": "array" + }, + "Description": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A finding's description.", + "title": "Description", + "type": "array" + }, + "FindingProviderFieldsConfidence": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The finding provider value for the finding confidence. Confidence is defined as the likelihood that a finding accurately identifies the behavior or issue that it was intended to identify.\n\nConfidence is scored on a 0-100 basis using a ratio scale, where 0 means zero percent confidence and 100 means 100 percent confidence.", + "title": "FindingProviderFieldsConfidence", + "type": "array" + }, + "FindingProviderFieldsCriticality": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The finding provider value for the level of importance assigned to the resources associated with the findings.\n\nA score of 0 means that the underlying resources have no criticality, and a score of 100 is reserved for the most critical resources.", + "title": "FindingProviderFieldsCriticality", + "type": "array" + }, + "FindingProviderFieldsRelatedFindingsId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The finding identifier of a related finding that is identified by the finding provider.", + "title": "FindingProviderFieldsRelatedFindingsId", + "type": "array" + }, + "FindingProviderFieldsRelatedFindingsProductArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN of the solution that generated a related finding that is identified by the finding provider.", + "title": "FindingProviderFieldsRelatedFindingsProductArn", + "type": "array" + }, + "FindingProviderFieldsSeverityLabel": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The finding provider value for the severity label.", + "title": "FindingProviderFieldsSeverityLabel", + "type": "array" + }, + "FindingProviderFieldsSeverityOriginal": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The finding provider's original value for the severity.", + "title": "FindingProviderFieldsSeverityOriginal", + "type": "array" + }, + "FindingProviderFieldsTypes": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "One or more finding types that the finding provider assigned to the finding. Uses the format of `namespace/category/classifier` that classify a finding.\n\nValid namespace values are: Software and Configuration Checks | TTPs | Effects | Unusual Behaviors | Sensitive Data Identifications", + "title": "FindingProviderFieldsTypes", + "type": "array" + }, + "FirstObservedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider first observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "FirstObservedAt", + "type": "array" + }, + "GeneratorId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier for the solution-specific component (a discrete unit of logic) that generated a finding. In various security findings providers' solutions, this generator can be called a rule, a check, a detector, a plugin, etc.", + "title": "GeneratorId", + "type": "array" + }, + "Id": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The security findings provider-specific identifier for a finding.", + "title": "Id", + "type": "array" + }, + "LastObservedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider most recently observed the potential security issue that a finding captured.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "LastObservedAt", + "type": "array" + }, + "MalwareName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the malware that was observed.", + "title": "MalwareName", + "type": "array" + }, + "MalwarePath": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The filesystem path of the malware that was observed.", + "title": "MalwarePath", + "type": "array" + }, + "MalwareState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The state of the malware that was observed.", + "title": "MalwareState", + "type": "array" + }, + "MalwareType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The type of the malware that was observed.", + "title": "MalwareType", + "type": "array" + }, + "NetworkDestinationDomain": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The destination domain of network-related information about a finding.", + "title": "NetworkDestinationDomain", + "type": "array" + }, + "NetworkDestinationIpV4": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The destination IPv4 address of network-related information about a finding.", + "title": "NetworkDestinationIpV4", + "type": "array" + }, + "NetworkDestinationIpV6": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The destination IPv6 address of network-related information about a finding.", + "title": "NetworkDestinationIpV6", + "type": "array" + }, + "NetworkDestinationPort": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The destination port of network-related information about a finding.", + "title": "NetworkDestinationPort", + "type": "array" + }, + "NetworkDirection": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Indicates the direction of network traffic associated with a finding.", + "title": "NetworkDirection", + "type": "array" + }, + "NetworkProtocol": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The protocol of network-related information about a finding.", + "title": "NetworkProtocol", + "type": "array" + }, + "NetworkSourceDomain": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The source domain of network-related information about a finding.", + "title": "NetworkSourceDomain", + "type": "array" + }, + "NetworkSourceIpV4": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The source IPv4 address of network-related information about a finding.", + "title": "NetworkSourceIpV4", + "type": "array" + }, + "NetworkSourceIpV6": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The source IPv6 address of network-related information about a finding.", + "title": "NetworkSourceIpV6", + "type": "array" + }, + "NetworkSourceMac": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The source media access control (MAC) address of network-related information about a finding.", + "title": "NetworkSourceMac", + "type": "array" + }, + "NetworkSourcePort": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The source port of network-related information about a finding.", + "title": "NetworkSourcePort", + "type": "array" + }, + "NoteText": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The text of a note.", + "title": "NoteText", + "type": "array" + }, + "NoteUpdatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "The timestamp of when the note was updated.", + "title": "NoteUpdatedAt", + "type": "array" + }, + "NoteUpdatedBy": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The principal that created a note.", + "title": "NoteUpdatedBy", + "type": "array" + }, + "ProcessLaunchedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies when the process was launched.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "ProcessLaunchedAt", + "type": "array" + }, + "ProcessName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the process.", + "title": "ProcessName", + "type": "array" + }, + "ProcessParentPid": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The parent process ID. This field accepts positive integers between `O` and `2147483647` .", + "title": "ProcessParentPid", + "type": "array" + }, + "ProcessPath": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The path to the process executable.", + "title": "ProcessPath", + "type": "array" + }, + "ProcessPid": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.NumberFilter" + }, + "markdownDescription": "The process ID.", + "title": "ProcessPid", + "type": "array" + }, + "ProcessTerminatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies when the process was terminated.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "ProcessTerminatedAt", + "type": "array" + }, + "ProductArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN generated by Security Hub that uniquely identifies a third-party company (security findings provider) after this provider's product (solution that generates findings) is registered with Security Hub.", + "title": "ProductArn", + "type": "array" + }, + "ProductFields": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "A data type where security findings providers can include additional solution-specific details that aren't part of the defined `AwsSecurityFinding` format.", + "title": "ProductFields", + "type": "array" + }, + "ProductName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the solution (product) that generates findings.", + "title": "ProductName", + "type": "array" + }, + "RecommendationText": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The recommendation of what to do about the issue described in a finding.", + "title": "RecommendationText", + "type": "array" + }, + "RecordState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The updated record state for the finding.", + "title": "RecordState", + "type": "array" + }, + "Region": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The Region from which the finding was generated.", + "title": "Region", + "type": "array" + }, + "RelatedFindingsId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The solution-generated identifier for a related finding.", + "title": "RelatedFindingsId", + "type": "array" + }, + "RelatedFindingsProductArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN of the solution that generated a related finding.", + "title": "RelatedFindingsProductArn", + "type": "array" + }, + "ResourceApplicationArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The ARN of the application that is related to a finding.", + "title": "ResourceApplicationArn", + "type": "array" + }, + "ResourceApplicationName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the application that is related to a finding.", + "title": "ResourceApplicationName", + "type": "array" + }, + "ResourceAwsEc2InstanceIamInstanceProfileArn": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The IAM profile ARN of the instance.", + "title": "ResourceAwsEc2InstanceIamInstanceProfileArn", + "type": "array" + }, + "ResourceAwsEc2InstanceImageId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The Amazon Machine Image (AMI) ID of the instance.", + "title": "ResourceAwsEc2InstanceImageId", + "type": "array" + }, + "ResourceAwsEc2InstanceIpV4Addresses": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The IPv4 addresses associated with the instance.", + "title": "ResourceAwsEc2InstanceIpV4Addresses", + "type": "array" + }, + "ResourceAwsEc2InstanceIpV6Addresses": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.IpFilter" + }, + "markdownDescription": "The IPv6 addresses associated with the instance.", + "title": "ResourceAwsEc2InstanceIpV6Addresses", + "type": "array" + }, + "ResourceAwsEc2InstanceKeyName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The key name associated with the instance.", + "title": "ResourceAwsEc2InstanceKeyName", + "type": "array" + }, + "ResourceAwsEc2InstanceLaunchedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "The date and time the instance was launched.", + "title": "ResourceAwsEc2InstanceLaunchedAt", + "type": "array" + }, + "ResourceAwsEc2InstanceSubnetId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier of the subnet that the instance was launched in.", + "title": "ResourceAwsEc2InstanceSubnetId", + "type": "array" + }, + "ResourceAwsEc2InstanceType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The instance type of the instance.", + "title": "ResourceAwsEc2InstanceType", + "type": "array" + }, + "ResourceAwsEc2InstanceVpcId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier of the VPC that the instance was launched in.", + "title": "ResourceAwsEc2InstanceVpcId", + "type": "array" + }, + "ResourceAwsIamAccessKeyCreatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "The creation date/time of the IAM access key related to a finding.", + "title": "ResourceAwsIamAccessKeyCreatedAt", + "type": "array" + }, + "ResourceAwsIamAccessKeyPrincipalName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the principal that is associated with an IAM access key.", + "title": "ResourceAwsIamAccessKeyPrincipalName", + "type": "array" + }, + "ResourceAwsIamAccessKeyStatus": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The status of the IAM access key related to a finding.", + "title": "ResourceAwsIamAccessKeyStatus", + "type": "array" + }, + "ResourceAwsIamUserUserName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of an IAM user.", + "title": "ResourceAwsIamUserUserName", + "type": "array" + }, + "ResourceAwsS3BucketOwnerId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical user ID of the owner of the S3 bucket.", + "title": "ResourceAwsS3BucketOwnerId", + "type": "array" + }, + "ResourceAwsS3BucketOwnerName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The display name of the owner of the S3 bucket.", + "title": "ResourceAwsS3BucketOwnerName", + "type": "array" + }, + "ResourceContainerImageId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The identifier of the image related to a finding.", + "title": "ResourceContainerImageId", + "type": "array" + }, + "ResourceContainerImageName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the image related to a finding.", + "title": "ResourceContainerImageName", + "type": "array" + }, + "ResourceContainerLaunchedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies when the container was started.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "ResourceContainerLaunchedAt", + "type": "array" + }, + "ResourceContainerName": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The name of the container related to a finding.", + "title": "ResourceContainerName", + "type": "array" + }, + "ResourceDetailsOther": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "The details of a resource that doesn't have a specific subfield for the resource type defined.", + "title": "ResourceDetailsOther", + "type": "array" + }, + "ResourceId": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical identifier for the given resource type.", + "title": "ResourceId", + "type": "array" + }, + "ResourcePartition": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical AWS partition name that the Region is assigned to.", + "title": "ResourcePartition", + "type": "array" + }, + "ResourceRegion": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The canonical AWS external Region name where this resource is located.", + "title": "ResourceRegion", + "type": "array" + }, + "ResourceTags": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "A list of AWS tags associated with a resource at the time the finding was processed.", + "title": "ResourceTags", + "type": "array" + }, + "ResourceType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Specifies the type of the resource that details are provided for.", + "title": "ResourceType", + "type": "array" + }, + "Sample": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.BooleanFilter" + }, + "markdownDescription": "Indicates whether or not sample findings are included in the filter results.", + "title": "Sample", + "type": "array" + }, + "SeverityLabel": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The label of a finding's severity.", + "title": "SeverityLabel", + "type": "array" + }, + "SourceUrl": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A URL that links to a page about the current finding in the security findings provider's solution.", + "title": "SourceUrl", + "type": "array" + }, + "ThreatIntelIndicatorCategory": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The category of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorCategory", + "type": "array" + }, + "ThreatIntelIndicatorLastObservedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that identifies the last observation of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorLastObservedAt", + "type": "array" + }, + "ThreatIntelIndicatorSource": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The source of the threat intelligence.", + "title": "ThreatIntelIndicatorSource", + "type": "array" + }, + "ThreatIntelIndicatorSourceUrl": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The URL for more details from the source of the threat intelligence.", + "title": "ThreatIntelIndicatorSourceUrl", + "type": "array" + }, + "ThreatIntelIndicatorType": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The type of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorType", + "type": "array" + }, + "ThreatIntelIndicatorValue": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The value of a threat intelligence indicator.", + "title": "ThreatIntelIndicatorValue", + "type": "array" + }, + "Title": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A finding's title.", + "title": "Title", + "type": "array" + }, + "Type": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "A finding type in the format of `namespace/category/classifier` that classifies a finding.", + "title": "Type", + "type": "array" + }, + "UpdatedAt": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateFilter" + }, + "markdownDescription": "A timestamp that indicates when the security findings provider last updated the finding record.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "UpdatedAt", + "type": "array" + }, + "UserDefinedFields": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.MapFilter" + }, + "markdownDescription": "A list of name/value string pairs associated with the finding. These are custom, user-defined fields added to a finding.", + "title": "UserDefinedFields", + "type": "array" + }, + "VerificationState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The veracity of a finding.", + "title": "VerificationState", + "type": "array" + }, + "VulnerabilitiesExploitAvailable": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Indicates whether a software vulnerability in your environment has a known exploit. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", + "title": "VulnerabilitiesExploitAvailable", + "type": "array" + }, + "VulnerabilitiesFixAvailable": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "Indicates whether a vulnerability is fixed in a newer version of the affected software packages. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", + "title": "VulnerabilitiesFixAvailable", + "type": "array" + }, + "WorkflowState": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The workflow state of a finding.\n\nNote that this field is deprecated. To search for a finding based on its workflow status, use `WorkflowStatus` .", + "title": "WorkflowState", + "type": "array" + }, + "WorkflowStatus": { + "items": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" + }, + "markdownDescription": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` .", + "title": "WorkflowStatus", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SecurityHub::Insight.BooleanFilter": { + "additionalProperties": false, + "properties": { + "Value": { + "markdownDescription": "The value of the boolean.", + "title": "Value", + "type": "boolean" + } + }, + "required": [ + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.DateFilter": { + "additionalProperties": false, + "properties": { + "DateRange": { + "$ref": "#/definitions/AWS::SecurityHub::Insight.DateRange", + "markdownDescription": "A date range for the date filter.", + "title": "DateRange" + }, + "End": { + "markdownDescription": "A timestamp that provides the end date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "End", + "type": "string" + }, + "Start": { + "markdownDescription": "A timestamp that provides the start date for the date filter.\n\nThis field accepts only the specified formats. Timestamps can end with `Z` or `(\"+\" / \"-\") time-hour [\":\" time-minute]` . The time-secfrac after seconds is limited to a maximum of 9 digits. The offset is bounded by +/-18:00. Here are valid timestamp formats with examples:\n\n- `YYYY-MM-DDTHH:MM:SSZ` (for example, `2019-01-31T23:00:00Z` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmmZ` (for example, `2019-01-31T23:00:00.123456789Z` )\n- `YYYY-MM-DDTHH:MM:SS+HH:MM` (for example, `2024-01-04T15:25:10+17:59` )\n- `YYYY-MM-DDTHH:MM:SS-HHMM` (for example, `2024-01-04T15:25:10-1759` )\n- `YYYY-MM-DDTHH:MM:SS.mmmmmmmmm+HH:MM` (for example, `2024-01-04T15:25:10.123456789+17:59` )", + "title": "Start", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityHub::Insight.DateRange": { + "additionalProperties": false, + "properties": { + "Unit": { + "markdownDescription": "A date range unit for the date filter.", + "title": "Unit", + "type": "string" + }, + "Value": { + "markdownDescription": "A date range value for the date filter.", + "title": "Value", + "type": "number" + } + }, + "required": [ + "Unit", + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.IpFilter": { + "additionalProperties": false, + "properties": { + "Cidr": { + "markdownDescription": "A finding's CIDR value.", + "title": "Cidr", + "type": "string" + } + }, + "required": [ + "Cidr" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.MapFilter": { + "additionalProperties": false, + "properties": { + "Comparison": { + "markdownDescription": "The condition to apply to the key value when filtering Security Hub findings with a map filter.\n\nTo search for values that have the filter value, use one of the following comparison operators:\n\n- To search for values that include the filter value, use `CONTAINS` . For example, for the `ResourceTags` field, the filter `Department CONTAINS Security` matches findings that include the value `Security` for the `Department` tag. In the same example, a finding with a value of `Security team` for the `Department` tag is a match.\n- To search for values that exactly match the filter value, use `EQUALS` . For example, for the `ResourceTags` field, the filter `Department EQUALS Security` matches findings that have the value `Security` for the `Department` tag.\n\n`CONTAINS` and `EQUALS` filters on the same field are joined by `OR` . A finding matches if it matches any one of those filters. For example, the filters `Department CONTAINS Security OR Department CONTAINS Finance` match a finding that includes either `Security` , `Finance` , or both values.\n\nTo search for values that don't have the filter value, use one of the following comparison operators:\n\n- To search for values that exclude the filter value, use `NOT_CONTAINS` . For example, for the `ResourceTags` field, the filter `Department NOT_CONTAINS Finance` matches findings that exclude the value `Finance` for the `Department` tag.\n- To search for values other than the filter value, use `NOT_EQUALS` . For example, for the `ResourceTags` field, the filter `Department NOT_EQUALS Finance` matches findings that don\u2019t have the value `Finance` for the `Department` tag.\n\n`NOT_CONTAINS` and `NOT_EQUALS` filters on the same field are joined by `AND` . A finding matches only if it matches all of those filters. For example, the filters `Department NOT_CONTAINS Security AND Department NOT_CONTAINS Finance` match a finding that excludes both the `Security` and `Finance` values.\n\n`CONTAINS` filters can only be used with other `CONTAINS` filters. `NOT_CONTAINS` filters can only be used with other `NOT_CONTAINS` filters.\n\nYou can\u2019t have both a `CONTAINS` filter and a `NOT_CONTAINS` filter on the same field. Similarly, you can\u2019t have both an `EQUALS` filter and a `NOT_EQUALS` filter on the same field. Combining filters in this way returns an error.\n\n`CONTAINS` and `NOT_CONTAINS` operators can be used only with automation rules. For more information, see [Automation rules](https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) in the *AWS Security Hub User Guide* .", + "title": "Comparison", + "type": "string" + }, + "Key": { + "markdownDescription": "The key of the map filter. For example, for `ResourceTags` , `Key` identifies the name of the tag. For `UserDefinedFields` , `Key` is the name of the field.", + "title": "Key", + "type": "string" + }, + "Value": { + "markdownDescription": "The value for the key in the map filter. Filter values are case sensitive. For example, one of the values for a tag called `Department` might be `Security` . If you provide `security` as the filter value, then there's no match.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Comparison", + "Key", + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::Insight.NumberFilter": { + "additionalProperties": false, + "properties": { + "Eq": { + "markdownDescription": "The equal-to condition to be applied to a single field when querying for findings.", + "title": "Eq", + "type": "number" + }, + "Gte": { + "markdownDescription": "The greater-than-equal condition to be applied to a single field when querying for findings.", + "title": "Gte", + "type": "number" + }, + "Lte": { + "markdownDescription": "The less-than-equal condition to be applied to a single field when querying for findings.", + "title": "Lte", + "type": "number" + } + }, + "type": "object" + }, + "AWS::SecurityHub::Insight.StringFilter": { + "additionalProperties": false, + "properties": { + "Comparison": { + "markdownDescription": "The condition to apply to a string value when filtering Security Hub findings.\n\nTo search for values that have the filter value, use one of the following comparison operators:\n\n- To search for values that include the filter value, use `CONTAINS` . For example, the filter `Title CONTAINS CloudFront` matches findings that have a `Title` that includes the string CloudFront.\n- To search for values that exactly match the filter value, use `EQUALS` . For example, the filter `AwsAccountId EQUALS 123456789012` only matches findings that have an account ID of `123456789012` .\n- To search for values that start with the filter value, use `PREFIX` . For example, the filter `ResourceRegion PREFIX us` matches findings that have a `ResourceRegion` that starts with `us` . A `ResourceRegion` that starts with a different value, such as `af` , `ap` , or `ca` , doesn't match.\n\n`CONTAINS` , `EQUALS` , and `PREFIX` filters on the same field are joined by `OR` . A finding matches if it matches any one of those filters. For example, the filters `Title CONTAINS CloudFront OR Title CONTAINS CloudWatch` match a finding that includes either `CloudFront` , `CloudWatch` , or both strings in the title.\n\nTo search for values that don\u2019t have the filter value, use one of the following comparison operators:\n\n- To search for values that exclude the filter value, use `NOT_CONTAINS` . For example, the filter `Title NOT_CONTAINS CloudFront` matches findings that have a `Title` that excludes the string CloudFront.\n- To search for values other than the filter value, use `NOT_EQUALS` . For example, the filter `AwsAccountId NOT_EQUALS 123456789012` only matches findings that have an account ID other than `123456789012` .\n- To search for values that don't start with the filter value, use `PREFIX_NOT_EQUALS` . For example, the filter `ResourceRegion PREFIX_NOT_EQUALS us` matches findings with a `ResourceRegion` that starts with a value other than `us` .\n\n`NOT_CONTAINS` , `NOT_EQUALS` , and `PREFIX_NOT_EQUALS` filters on the same field are joined by `AND` . A finding matches only if it matches all of those filters. For example, the filters `Title NOT_CONTAINS CloudFront AND Title NOT_CONTAINS CloudWatch` match a finding that excludes both `CloudFront` and `CloudWatch` in the title.\n\nYou can\u2019t have both a `CONTAINS` filter and a `NOT_CONTAINS` filter on the same field. Similarly, you can't provide both an `EQUALS` filter and a `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filter on the same field. Combining filters in this way returns an error. `CONTAINS` filters can only be used with other `CONTAINS` filters. `NOT_CONTAINS` filters can only be used with other `NOT_CONTAINS` filters.\n\nYou can combine `PREFIX` filters with `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filters for the same field. Security Hub first processes the `PREFIX` filters, and then the `NOT_EQUALS` or `PREFIX_NOT_EQUALS` filters.\n\nFor example, for the following filters, Security Hub first identifies findings that have resource types that start with either `AwsIam` or `AwsEc2` . It then excludes findings that have a resource type of `AwsIamPolicy` and findings that have a resource type of `AwsEc2NetworkInterface` .\n\n- `ResourceType PREFIX AwsIam`\n- `ResourceType PREFIX AwsEc2`\n- `ResourceType NOT_EQUALS AwsIamPolicy`\n- `ResourceType NOT_EQUALS AwsEc2NetworkInterface`\n\n`CONTAINS` and `NOT_CONTAINS` operators can be used only with automation rules. For more information, see [Automation rules](https://docs.aws.amazon.com/securityhub/latest/userguide/automation-rules.html) in the *AWS Security Hub User Guide* .", + "title": "Comparison", + "type": "string" + }, + "Value": { + "markdownDescription": "The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is `Security Hub` . If you provide `security hub` as the filter value, there's no match.", + "title": "Value", + "type": "string" + } + }, + "required": [ + "Comparison", + "Value" + ], + "type": "object" + }, + "AWS::SecurityHub::ProductSubscription": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ProductArn": { + "markdownDescription": "The ARN of the product to enable the integration for.", + "title": "ProductArn", + "type": "string" + } + }, + "required": [ + "ProductArn" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityHub::ProductSubscription" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::SecurityHub::Standard": { "additionalProperties": false, "properties": { @@ -249894,6 +254710,93 @@ ], "type": "object" }, + "AWS::SecurityLake::AwsLogSource": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "Accounts": { + "items": { + "type": "string" + }, + "markdownDescription": "Specify the AWS account information where you want to enable Security Lake.", + "title": "Accounts", + "type": "array" + }, + "DataLakeArn": { + "markdownDescription": "The Amazon Resource Name (ARN) used to create the data lake.", + "title": "DataLakeArn", + "type": "string" + }, + "SourceName": { + "markdownDescription": "The name for a AWS source. This must be a Regionally unique value. For the list of sources supported by Amazon Security Lake see [Collecting data from AWS services](https://docs.aws.amazon.com//security-lake/latest/userguide/internal-sources.html) in the Amazon Security Lake User Guide.", + "title": "SourceName", + "type": "string" + }, + "SourceVersion": { + "markdownDescription": "The version for a AWS source. For more details about source versions supported by Amazon Security Lake see [OCSF source identification](https://docs.aws.amazon.com//security-lake/latest/userguide/open-cybersecurity-schema-framework.html#ocsf-source-identification) in the Amazon Security Lake User Guide. This must be a Regionally unique value.", + "title": "SourceVersion", + "type": "string" + } + }, + "required": [ + "DataLakeArn", + "SourceName", + "SourceVersion" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityLake::AwsLogSource" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, "AWS::SecurityLake::DataLake": { "additionalProperties": false, "properties": { @@ -249929,30 +254832,225 @@ "Properties": { "additionalProperties": false, "properties": { - "EncryptionConfiguration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.EncryptionConfiguration" + "EncryptionConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.EncryptionConfiguration", + "markdownDescription": "Provides encryption details of the Amazon Security Lake object.", + "title": "EncryptionConfiguration" + }, + "LifecycleConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.LifecycleConfiguration", + "markdownDescription": "You can customize Security Lake to store data in your preferred AWS Regions for your preferred amount of time. Lifecycle management can help you comply with different compliance requirements. For more details, see [Lifecycle management](https://docs.aws.amazon.com//security-lake/latest/userguide/lifecycle-management.html) in the Amazon Security Lake User Guide.", + "title": "LifecycleConfiguration" + }, + "MetaStoreManagerRoleArn": { + "markdownDescription": "The Amazon Resource Name (ARN) used to create and update the AWS Glue table. This table contains partitions generated by the ingestion and normalization of AWS log sources and custom sources.", + "title": "MetaStoreManagerRoleArn", + "type": "string" + }, + "ReplicationConfiguration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.ReplicationConfiguration", + "markdownDescription": "Provides replication details of Amazon Security Lake object.", + "title": "ReplicationConfiguration" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "An array of objects, one for each tag to associate with the data lake configuration. For each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but it can be an empty string.", + "title": "Tags", + "type": "array" + } + }, + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SecurityLake::DataLake" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type" + ], + "type": "object" + }, + "AWS::SecurityLake::DataLake.EncryptionConfiguration": { + "additionalProperties": false, + "properties": { + "KmsKeyId": { + "markdownDescription": "The ID of KMS encryption key used by Amazon Security Lake to encrypt the Security Lake object.", + "title": "KmsKeyId", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Expiration": { + "additionalProperties": false, + "properties": { + "Days": { + "markdownDescription": "The number of days before data expires in the Amazon Security Lake object.", + "title": "Days", + "type": "number" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.LifecycleConfiguration": { + "additionalProperties": false, + "properties": { + "Expiration": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Expiration", + "markdownDescription": "Provides data expiration details of the Amazon Security Lake object.", + "title": "Expiration" + }, + "Transitions": { + "items": { + "$ref": "#/definitions/AWS::SecurityLake::DataLake.Transitions" + }, + "markdownDescription": "Provides data storage transition details of Amazon Security Lake object. By configuring these settings, you can specify your preferred Amazon S3 storage class and the time period for S3 objects to stay in that storage class before they transition to a different storage class.", + "title": "Transitions", + "type": "array" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.ReplicationConfiguration": { + "additionalProperties": false, + "properties": { + "Regions": { + "items": { + "type": "string" + }, + "markdownDescription": "Specifies one or more centralized rollup Regions. The AWS Region specified in the region parameter of the `CreateDataLake` or `UpdateDataLake` operations contributes data to the rollup Region or Regions specified in this parameter.\n\nReplication enables automatic, asynchronous copying of objects across Amazon S3 buckets. S3 buckets that are configured for object replication can be owned by the same AWS account or by different accounts. You can replicate objects to a single destination bucket or to multiple destination buckets. The destination buckets can be in different Regions or within the same Region as the source bucket.", + "title": "Regions", + "type": "array" + }, + "RoleArn": { + "markdownDescription": "Replication settings for the Amazon S3 buckets. This parameter uses the AWS Identity and Access Management (IAM) role you created that is managed by Security Lake , to ensure the replication setting is correct.", + "title": "RoleArn", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::DataLake.Transitions": { + "additionalProperties": false, + "properties": { + "Days": { + "markdownDescription": "The number of days before data transitions to a different S3 Storage Class in the Amazon Security Lake object.", + "title": "Days", + "type": "number" + }, + "StorageClass": { + "markdownDescription": "The list of storage classes that you can choose from based on the data access, resiliency, and cost requirements of your workloads. The default storage class is S3 Standard.", + "title": "StorageClass", + "type": "string" + } + }, + "type": "object" + }, + "AWS::SecurityLake::Subscriber": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "AccessTypes": { + "items": { + "type": "string" + }, + "markdownDescription": "You can choose to notify subscribers of new objects with an Amazon Simple Queue Service (Amazon SQS) queue or through messaging to an HTTPS endpoint provided by the subscriber.\n\nSubscribers can consume data by directly querying AWS Lake Formation tables in your Amazon S3 bucket through services like Amazon Athena. This subscription type is defined as `LAKEFORMATION` .", + "title": "AccessTypes", + "type": "array" }, - "LifecycleConfiguration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.LifecycleConfiguration" + "DataLakeArn": { + "markdownDescription": "The Amazon Resource Name (ARN) used to create the data lake.", + "title": "DataLakeArn", + "type": "string" }, - "MetaStoreManagerRoleArn": { + "Sources": { + "items": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.Source" + }, + "markdownDescription": "Amazon Security Lake supports log and event collection for natively supported AWS services . For more information, see the [Amazon Security Lake User Guide](https://docs.aws.amazon.com//security-lake/latest/userguide/source-management.html) .", + "title": "Sources", + "type": "array" + }, + "SubscriberDescription": { + "markdownDescription": "The subscriber descriptions for a subscriber account. The description for a subscriber includes `subscriberName` , `accountID` , `externalID` , and `subscriberId` .", + "title": "SubscriberDescription", "type": "string" }, - "ReplicationConfiguration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.ReplicationConfiguration" + "SubscriberIdentity": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.SubscriberIdentity", + "markdownDescription": "The AWS identity used to access your data.", + "title": "SubscriberIdentity" + }, + "SubscriberName": { + "markdownDescription": "The name of your Amazon Security Lake subscriber account.", + "title": "SubscriberName", + "type": "string" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, + "markdownDescription": "An array of objects, one for each tag to associate with the subscriber. For each tag, you must specify both a tag key and a tag value. A tag value cannot be null, but it can be an empty string.", + "title": "Tags", "type": "array" } }, + "required": [ + "AccessTypes", + "DataLakeArn", + "Sources", + "SubscriberIdentity", + "SubscriberName" + ], "type": "object" }, "Type": { "enum": [ - "AWS::SecurityLake::DataLake" + "AWS::SecurityLake::Subscriber" ], "type": "string" }, @@ -249966,68 +255064,77 @@ } }, "required": [ - "Type" + "Type", + "Properties" ], "type": "object" }, - "AWS::SecurityLake::DataLake.EncryptionConfiguration": { + "AWS::SecurityLake::Subscriber.AwsLogSource": { "additionalProperties": false, "properties": { - "KmsKeyId": { + "SourceName": { + "markdownDescription": "Source name of the natively supported AWS service that is supported as an Amazon Security Lake source. For the list of sources supported by Amazon Security Lake see [Collecting data from AWS services](https://docs.aws.amazon.com//security-lake/latest/userguide/internal-sources.html) in the Amazon Security Lake User Guide.", + "title": "SourceName", + "type": "string" + }, + "SourceVersion": { + "markdownDescription": "Source version of the natively supported AWS service that is supported as an Amazon Security Lake source. For more details about source versions supported by Amazon Security Lake see [OCSF source identification](https://docs.aws.amazon.com//security-lake/latest/userguide/open-cybersecurity-schema-framework.html#ocsf-source-identification) in the Amazon Security Lake User Guide.", + "title": "SourceVersion", "type": "string" } }, "type": "object" }, - "AWS::SecurityLake::DataLake.Expiration": { - "additionalProperties": false, - "properties": { - "Days": { - "type": "number" - } - }, - "type": "object" - }, - "AWS::SecurityLake::DataLake.LifecycleConfiguration": { + "AWS::SecurityLake::Subscriber.CustomLogSource": { "additionalProperties": false, "properties": { - "Expiration": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.Expiration" + "SourceName": { + "markdownDescription": "The name of the custom log source.", + "title": "SourceName", + "type": "string" }, - "Transitions": { - "items": { - "$ref": "#/definitions/AWS::SecurityLake::DataLake.Transitions" - }, - "type": "array" + "SourceVersion": { + "markdownDescription": "The source version of the custom log source.", + "title": "SourceVersion", + "type": "string" } }, "type": "object" }, - "AWS::SecurityLake::DataLake.ReplicationConfiguration": { + "AWS::SecurityLake::Subscriber.Source": { "additionalProperties": false, "properties": { - "Regions": { - "items": { - "type": "string" - }, - "type": "array" + "AwsLogSource": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.AwsLogSource", + "markdownDescription": "The natively supported AWS service which is used a Amazon Security Lake source to collect logs and events from.", + "title": "AwsLogSource" }, - "RoleArn": { - "type": "string" + "CustomLogSource": { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber.CustomLogSource", + "markdownDescription": "The custom log source AWS which is used a Amazon Security Lake source to collect logs and events from.", + "title": "CustomLogSource" } }, "type": "object" }, - "AWS::SecurityLake::DataLake.Transitions": { + "AWS::SecurityLake::Subscriber.SubscriberIdentity": { "additionalProperties": false, "properties": { - "Days": { - "type": "number" + "ExternalId": { + "markdownDescription": "The external ID is a unique identifier that the subscriber provides to you.", + "title": "ExternalId", + "type": "string" }, - "StorageClass": { + "Principal": { + "markdownDescription": "Principals can include accounts, users, roles, federated users, or AWS services.", + "title": "Principal", "type": "string" } }, + "required": [ + "ExternalId", + "Principal" + ], "type": "object" }, "AWS::ServiceCatalog::AcceptedPortfolioShare": { @@ -254442,36 +259549,241 @@ "Properties": { "additionalProperties": false, "properties": { - "Name": { - "markdownDescription": "A name for the group. It can include any Unicode characters.\n\nThe names for all groups in your account, across all Regions, must be unique.", - "title": "Name", + "Name": { + "markdownDescription": "A name for the group. It can include any Unicode characters.\n\nThe names for all groups in your account, across all Regions, must be unique.", + "title": "Name", + "type": "string" + }, + "ResourceArns": { + "items": { + "type": "string" + }, + "markdownDescription": "The ARNs of the canaries that you want to associate with this group.", + "title": "ResourceArns", + "type": "array" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The list of key-value pairs that are associated with the group.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "Name" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::Synthetics::Group" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::SystemsManagerSAP::Application": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "ApplicationId": { + "markdownDescription": "The ID of the application.", + "title": "ApplicationId", + "type": "string" + }, + "ApplicationType": { + "markdownDescription": "The type of the application.", + "title": "ApplicationType", + "type": "string" + }, + "Credentials": { + "items": { + "$ref": "#/definitions/AWS::SystemsManagerSAP::Application.Credential" + }, + "markdownDescription": "The credentials of the SAP application.", + "title": "Credentials", + "type": "array" + }, + "Instances": { + "items": { + "type": "string" + }, + "markdownDescription": "The Amazon EC2 instances on which your SAP application is running.", + "title": "Instances", + "type": "array" + }, + "SapInstanceNumber": { + "markdownDescription": "The SAP instance number of the application.", + "title": "SapInstanceNumber", + "type": "string" + }, + "Sid": { + "markdownDescription": "The System ID of the application.", + "title": "Sid", + "type": "string" + }, + "Tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "markdownDescription": "The tags on the application.", + "title": "Tags", + "type": "array" + } + }, + "required": [ + "ApplicationId", + "ApplicationType" + ], + "type": "object" + }, + "Type": { + "enum": [ + "AWS::SystemsManagerSAP::Application" + ], + "type": "string" + }, + "UpdateReplacePolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + } + }, + "required": [ + "Type", + "Properties" + ], + "type": "object" + }, + "AWS::SystemsManagerSAP::Application.Credential": { + "additionalProperties": false, + "properties": { + "CredentialType": { + "markdownDescription": "The type of the application credentials.", + "title": "CredentialType", + "type": "string" + }, + "DatabaseName": { + "markdownDescription": "The name of the SAP HANA database.", + "title": "DatabaseName", + "type": "string" + }, + "SecretId": { + "markdownDescription": "The secret ID created in AWS Secrets Manager to store the credentials of the SAP application.", + "title": "SecretId", + "type": "string" + } + }, + "type": "object" + }, + "AWS::Timestream::Database": { + "additionalProperties": false, + "properties": { + "Condition": { + "type": "string" + }, + "DeletionPolicy": { + "enum": [ + "Delete", + "Retain", + "Snapshot" + ], + "type": "string" + }, + "DependsOn": { + "anyOf": [ + { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + { + "items": { + "pattern": "^[a-zA-Z0-9]+$", + "type": "string" + }, + "type": "array" + } + ] + }, + "Metadata": { + "type": "object" + }, + "Properties": { + "additionalProperties": false, + "properties": { + "DatabaseName": { + "markdownDescription": "The name of the Timestream database.\n\n*Length Constraints* : Minimum length of 3 bytes. Maximum length of 256 bytes.", + "title": "DatabaseName", "type": "string" }, - "ResourceArns": { - "items": { - "type": "string" - }, - "markdownDescription": "The ARNs of the canaries that you want to associate with this group.", - "title": "ResourceArns", - "type": "array" + "KmsKeyId": { + "markdownDescription": "The identifier of the AWS KMS key used to encrypt the data stored in the database.", + "title": "KmsKeyId", + "type": "string" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The list of key-value pairs that are associated with the group.", + "markdownDescription": "The tags to add to the database.", "title": "Tags", "type": "array" } }, - "required": [ - "Name" - ], "type": "object" }, "Type": { "enum": [ - "AWS::Synthetics::Group" + "AWS::Timestream::Database" ], "type": "string" }, @@ -254485,12 +259797,11 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, - "AWS::SystemsManagerSAP::Application": { + "AWS::Timestream::InfluxDBInstance": { "additionalProperties": false, "properties": { "Condition": { @@ -254525,60 +259836,96 @@ "Properties": { "additionalProperties": false, "properties": { - "ApplicationId": { - "markdownDescription": "The ID of the application.", - "title": "ApplicationId", + "AllocatedStorage": { + "markdownDescription": "The amount of storage to allocate for your DB storage type in GiB (gibibytes).", + "title": "AllocatedStorage", + "type": "number" + }, + "Bucket": { + "markdownDescription": "The name of the initial InfluxDB bucket. All InfluxDB data is stored in a bucket. A bucket combines the concept of a database and a retention period (the duration of time that each data point persists). A bucket belongs to an organization.", + "title": "Bucket", "type": "string" }, - "ApplicationType": { - "markdownDescription": "The type of the application.", - "title": "ApplicationType", + "DbInstanceType": { + "markdownDescription": "The Timestream for InfluxDB DB instance type to run on.", + "title": "DbInstanceType", "type": "string" }, - "Credentials": { - "items": { - "$ref": "#/definitions/AWS::SystemsManagerSAP::Application.Credential" - }, - "markdownDescription": "The credentials of the SAP application.", - "title": "Credentials", - "type": "array" + "DbParameterGroupIdentifier": { + "markdownDescription": "The name or id of the DB parameter group to assign to your DB instance. DB parameter groups specify how the database is configured. For example, DB parameter groups can specify the limit for query concurrency.", + "title": "DbParameterGroupIdentifier", + "type": "string" }, - "Instances": { - "items": { - "type": "string" - }, - "markdownDescription": "The Amazon EC2 instances on which your SAP application is running.", - "title": "Instances", - "type": "array" + "DbStorageType": { + "markdownDescription": "The Timestream for InfluxDB DB storage type to read and write InfluxDB data.\n\nYou can choose between 3 different types of provisioned Influx IOPS included storage according to your workloads requirements:\n\n- Influx IO Included 3000 IOPS\n- Influx IO Included 12000 IOPS\n- Influx IO Included 16000 IOPS", + "title": "DbStorageType", + "type": "string" }, - "SapInstanceNumber": { - "markdownDescription": "The SAP instance number of the application.", - "title": "SapInstanceNumber", + "DeploymentType": { + "markdownDescription": "Specifies whether the Timestream for InfluxDB is deployed as Single-AZ or with a MultiAZ Standby for High availability.", + "title": "DeploymentType", "type": "string" }, - "Sid": { - "markdownDescription": "The System ID of the application.", - "title": "Sid", + "LogDeliveryConfiguration": { + "$ref": "#/definitions/AWS::Timestream::InfluxDBInstance.LogDeliveryConfiguration", + "markdownDescription": "Configuration for sending InfluxDB engine logs to a specified S3 bucket.", + "title": "LogDeliveryConfiguration" + }, + "Name": { + "markdownDescription": "The name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands. This name will also be a prefix included in the endpoint. DB instance names must be unique per customer and per region.", + "title": "Name", + "type": "string" + }, + "Organization": { + "markdownDescription": "The name of the initial organization for the initial admin user in InfluxDB. An InfluxDB organization is a workspace for a group of users.", + "title": "Organization", + "type": "string" + }, + "Password": { + "markdownDescription": "The password of the initial admin user created in InfluxDB. This password will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon SecretManager in your account.", + "title": "Password", "type": "string" }, + "PubliclyAccessible": { + "markdownDescription": "Configures the DB instance with a public IP to facilitate access.", + "title": "PubliclyAccessible", + "type": "boolean" + }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags on the application.", + "markdownDescription": "A list of key-value pairs to associate with the DB instance.", "title": "Tags", "type": "array" + }, + "Username": { + "markdownDescription": "The username of the initial admin user created in InfluxDB. Must start with a letter and can't end with a hyphen or contain two consecutive hyphens. For example, my-user1. This username will allow you to access the InfluxDB UI to perform various administrative tasks and also use the InfluxDB CLI to create an operator token. These attributes will be stored in a Secret created in Amazon Secrets Manager in your account.", + "title": "Username", + "type": "string" + }, + "VpcSecurityGroupIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of VPC security group IDs to associate with the DB instance.", + "title": "VpcSecurityGroupIds", + "type": "array" + }, + "VpcSubnetIds": { + "items": { + "type": "string" + }, + "markdownDescription": "A list of VPC subnet IDs to associate with the DB instance. Provide at least two VPC subnet IDs in different availability zones when deploying with a Multi-AZ standby.", + "title": "VpcSubnetIds", + "type": "array" } }, - "required": [ - "ApplicationId", - "ApplicationType" - ], "type": "object" }, "Type": { "enum": [ - "AWS::SystemsManagerSAP::Application" + "AWS::Timestream::InfluxDBInstance" ], "type": "string" }, @@ -254592,105 +259939,41 @@ } }, "required": [ - "Type", - "Properties" + "Type" ], "type": "object" }, - "AWS::SystemsManagerSAP::Application.Credential": { + "AWS::Timestream::InfluxDBInstance.LogDeliveryConfiguration": { "additionalProperties": false, "properties": { - "CredentialType": { - "markdownDescription": "The type of the application credentials.", - "title": "CredentialType", - "type": "string" - }, - "DatabaseName": { - "markdownDescription": "The name of the SAP HANA database.", - "title": "DatabaseName", - "type": "string" - }, - "SecretId": { - "markdownDescription": "The secret ID created in AWS Secrets Manager to store the credentials of the SAP application.", - "title": "SecretId", - "type": "string" + "S3Configuration": { + "$ref": "#/definitions/AWS::Timestream::InfluxDBInstance.S3Configuration", + "markdownDescription": "Configuration for S3 bucket log delivery", + "title": "S3Configuration" } }, + "required": [ + "S3Configuration" + ], "type": "object" }, - "AWS::Timestream::Database": { + "AWS::Timestream::InfluxDBInstance.S3Configuration": { "additionalProperties": false, "properties": { - "Condition": { - "type": "string" - }, - "DeletionPolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" - }, - "DependsOn": { - "anyOf": [ - { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - { - "items": { - "pattern": "^[a-zA-Z0-9]+$", - "type": "string" - }, - "type": "array" - } - ] - }, - "Metadata": { - "type": "object" - }, - "Properties": { - "additionalProperties": false, - "properties": { - "DatabaseName": { - "markdownDescription": "The name of the Timestream database.\n\n*Length Constraints* : Minimum length of 3 bytes. Maximum length of 256 bytes.", - "title": "DatabaseName", - "type": "string" - }, - "KmsKeyId": { - "markdownDescription": "The identifier of the AWS KMS key used to encrypt the data stored in the database.", - "title": "KmsKeyId", - "type": "string" - }, - "Tags": { - "items": { - "$ref": "#/definitions/Tag" - }, - "markdownDescription": "The tags to add to the database.", - "title": "Tags", - "type": "array" - } - }, - "type": "object" - }, - "Type": { - "enum": [ - "AWS::Timestream::Database" - ], + "BucketName": { + "markdownDescription": "The bucket name of the customer S3 bucket.", + "title": "BucketName", "type": "string" }, - "UpdateReplacePolicy": { - "enum": [ - "Delete", - "Retain", - "Snapshot" - ], - "type": "string" + "Enabled": { + "markdownDescription": "Indicates whether log delivery to the S3 bucket is enabled.", + "title": "Enabled", + "type": "boolean" } }, "required": [ - "Type" + "BucketName", + "Enabled" ], "type": "object" }, @@ -255456,7 +260739,7 @@ "type": "array" }, "Usage": { - "markdownDescription": "Specifies whether this certificate is used for signing or encryption.", + "markdownDescription": "Specifies how this certificate is used. It can be used in the following ways:\n\n- `SIGNING` : For signing AS2 messages\n- `ENCRYPTION` : For encrypting AS2 messages\n- `TLS` : For securing AS2 communications sent over HTTPS", "title": "Usage", "type": "string" } @@ -255784,7 +261067,7 @@ "type": "string" }, "Domain": { - "markdownDescription": "Specifies the domain of the storage system that is used for file transfers.", + "markdownDescription": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.", "title": "Domain", "type": "string" }, @@ -255842,7 +261125,7 @@ "title": "S3StorageOptions" }, "SecurityPolicyName": { - "markdownDescription": "Specifies the name of the security policy that is attached to the server.", + "markdownDescription": "Specifies the name of the security policy for the server.", "title": "SecurityPolicyName", "type": "string" }, @@ -256605,7 +261888,7 @@ "properties": { "Configuration": { "$ref": "#/definitions/AWS::VerifiedPermissions::IdentitySource.IdentitySourceConfiguration", - "markdownDescription": "Contains configuration information used when creating a new identity source.\n\n> At this time, the only valid member of this structure is a Amazon Cognito user pool configuration.\n> \n> You must specify a `userPoolArn` , and optionally, a `ClientId` . \n\nThis data type is used as a request parameter for the [CreateIdentitySource](https://docs.aws.amazon.com/verifiedpermissions/latest/apireference/API_CreateIdentitySource.html) operation.", + "markdownDescription": "Contains configuration information about an identity source.", "title": "Configuration" }, "PolicyStoreId": { @@ -256646,6 +261929,20 @@ ], "type": "object" }, + "AWS::VerifiedPermissions::IdentitySource.CognitoGroupConfiguration": { + "additionalProperties": false, + "properties": { + "GroupEntityType": { + "markdownDescription": "The name of the schema entity type that's mapped to the user pool group. Defaults to `AWS::CognitoGroup` .", + "title": "GroupEntityType", + "type": "string" + } + }, + "required": [ + "GroupEntityType" + ], + "type": "object" + }, "AWS::VerifiedPermissions::IdentitySource.CognitoUserPoolConfiguration": { "additionalProperties": false, "properties": { @@ -256657,6 +261954,11 @@ "title": "ClientIds", "type": "array" }, + "GroupConfiguration": { + "$ref": "#/definitions/AWS::VerifiedPermissions::IdentitySource.CognitoGroupConfiguration", + "markdownDescription": "The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source.", + "title": "GroupConfiguration" + }, "UserPoolArn": { "markdownDescription": "The [Amazon Resource Name (ARN)](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) of the Amazon Cognito user pool that contains the identities to be authorized.", "title": "UserPoolArn", @@ -256682,35 +261984,6 @@ ], "type": "object" }, - "AWS::VerifiedPermissions::IdentitySource.IdentitySourceDetails": { - "additionalProperties": false, - "properties": { - "ClientIds": { - "items": { - "type": "string" - }, - "markdownDescription": "The application client IDs associated with the specified Amazon Cognito user pool that are enabled for this identity source.", - "title": "ClientIds", - "type": "array" - }, - "DiscoveryUrl": { - "markdownDescription": "The well-known URL that points to this user pool's OIDC discovery endpoint. This is a URL string in the following format. This URL replaces the placeholders for both the AWS Region and the user pool identifier with those appropriate for this user pool.\n\n`https://cognito-idp. ** .amazonaws.com/ ** /.well-known/openid-configuration`", - "title": "DiscoveryUrl", - "type": "string" - }, - "OpenIdIssuer": { - "markdownDescription": "A string that identifies the type of OIDC service represented by this identity source.\n\nAt this time, the only valid value is `cognito` .", - "title": "OpenIdIssuer", - "type": "string" - }, - "UserPoolArn": { - "markdownDescription": "The [Amazon Resource Name (ARN)](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) of the Amazon Cognito user pool whose identities are accessible to this Verified Permissions policy store.", - "title": "UserPoolArn", - "type": "string" - } - }, - "type": "object" - }, "AWS::VerifiedPermissions::Policy": { "additionalProperties": false, "properties": { @@ -264447,7 +269720,7 @@ "items": { "type": "string" }, - "markdownDescription": "The fields from the source that are made available to your agents in Amazon Q. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations.", + "markdownDescription": "The fields from the source that are made available to your agents in Amazon Q in Connect. Optional if ObjectConfiguration is included in the provided DataIntegration.\n\n- For [Salesforce](https://docs.aws.amazon.com/https://developer.salesforce.com/docs/atlas.en-us.knowledge_dev.meta/knowledge_dev/sforce_api_objects_knowledge__kav.htm) , you must include at least `Id` , `ArticleNumber` , `VersionNumber` , `Title` , `PublishStatus` , and `IsDeleted` .\n- For [ServiceNow](https://docs.aws.amazon.com/https://developer.servicenow.com/dev.do#!/reference/api/rome/rest/knowledge-management-api) , you must include at least `number` , `short_description` , `sys_mod_count` , `workflow_state` , and `active` .\n- For [Zendesk](https://docs.aws.amazon.com/https://developer.zendesk.com/api-reference/help_center/help-center-api/articles/) , you must include at least `id` , `title` , `updated_at` , and `draft` .\n\nMake sure to include additional fields. These fields are indexed and used to source recommendations.", "title": "ObjectFields", "type": "array" } @@ -264773,12 +270046,12 @@ "type": "string" }, "DesktopArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces , WorkSpaces Web, or AppStream 2.0 .", + "markdownDescription": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.", "title": "DesktopArn", "type": "string" }, "DesktopEndpoint": { - "markdownDescription": "The URL for the identity provider login (only for environments that use AppStream 2.0 ).", + "markdownDescription": "The URL for the identity provider login (only for environments that use AppStream 2.0).", "title": "DesktopEndpoint", "type": "string" }, @@ -266816,6 +272089,18 @@ { "$ref": "#/definitions/AWS::Batch::SchedulingPolicy" }, + { + "$ref": "#/definitions/AWS::Bedrock::Agent" + }, + { + "$ref": "#/definitions/AWS::Bedrock::AgentAlias" + }, + { + "$ref": "#/definitions/AWS::Bedrock::DataSource" + }, + { + "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase" + }, { "$ref": "#/definitions/AWS::BillingConductor::BillingGroup" }, @@ -266879,6 +272164,12 @@ { "$ref": "#/definitions/AWS::CleanRooms::Membership" }, + { + "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate" + }, + { + "$ref": "#/definitions/AWS::CleanRoomsML::TrainingDataset" + }, { "$ref": "#/definitions/AWS::Cloud9::EnvironmentEC2" }, @@ -267026,6 +272317,9 @@ { "$ref": "#/definitions/AWS::CodeCommit::Repository" }, + { + "$ref": "#/definitions/AWS::CodeConnections::Connection" + }, { "$ref": "#/definitions/AWS::CodeDeploy::Application" }, @@ -267365,6 +272659,30 @@ { "$ref": "#/definitions/AWS::DataZone::SubscriptionTarget" }, + { + "$ref": "#/definitions/AWS::Deadline::Farm" + }, + { + "$ref": "#/definitions/AWS::Deadline::Fleet" + }, + { + "$ref": "#/definitions/AWS::Deadline::LicenseEndpoint" + }, + { + "$ref": "#/definitions/AWS::Deadline::MeteredProduct" + }, + { + "$ref": "#/definitions/AWS::Deadline::Queue" + }, + { + "$ref": "#/definitions/AWS::Deadline::QueueEnvironment" + }, + { + "$ref": "#/definitions/AWS::Deadline::QueueFleetAssociation" + }, + { + "$ref": "#/definitions/AWS::Deadline::StorageProfile" + }, { "$ref": "#/definitions/AWS::Detective::Graph" }, @@ -267707,6 +273025,9 @@ { "$ref": "#/definitions/AWS::ECR::Repository" }, + { + "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate" + }, { "$ref": "#/definitions/AWS::ECS::CapacityProvider" }, @@ -267860,9 +273181,15 @@ { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow" }, + { + "$ref": "#/definitions/AWS::EntityResolution::IdNamespace" + }, { "$ref": "#/definitions/AWS::EntityResolution::MatchingWorkflow" }, + { + "$ref": "#/definitions/AWS::EntityResolution::PolicyStatement" + }, { "$ref": "#/definitions/AWS::EntityResolution::SchemaMapping" }, @@ -268004,6 +273331,9 @@ { "$ref": "#/definitions/AWS::GlobalAccelerator::Accelerator" }, + { + "$ref": "#/definitions/AWS::GlobalAccelerator::CrossAccountAttachment" + }, { "$ref": "#/definitions/AWS::GlobalAccelerator::EndpointGroup" }, @@ -268211,15 +273541,24 @@ { "$ref": "#/definitions/AWS::IVS::Channel" }, + { + "$ref": "#/definitions/AWS::IVS::EncoderConfiguration" + }, { "$ref": "#/definitions/AWS::IVS::PlaybackKeyPair" }, + { + "$ref": "#/definitions/AWS::IVS::PlaybackRestrictionPolicy" + }, { "$ref": "#/definitions/AWS::IVS::RecordingConfiguration" }, { "$ref": "#/definitions/AWS::IVS::Stage" }, + { + "$ref": "#/definitions/AWS::IVS::StorageConfiguration" + }, { "$ref": "#/definitions/AWS::IVS::StreamKey" }, @@ -269723,15 +275062,30 @@ { "$ref": "#/definitions/AWS::SecurityHub::AutomationRule" }, + { + "$ref": "#/definitions/AWS::SecurityHub::DelegatedAdmin" + }, { "$ref": "#/definitions/AWS::SecurityHub::Hub" }, + { + "$ref": "#/definitions/AWS::SecurityHub::Insight" + }, + { + "$ref": "#/definitions/AWS::SecurityHub::ProductSubscription" + }, { "$ref": "#/definitions/AWS::SecurityHub::Standard" }, + { + "$ref": "#/definitions/AWS::SecurityLake::AwsLogSource" + }, { "$ref": "#/definitions/AWS::SecurityLake::DataLake" }, + { + "$ref": "#/definitions/AWS::SecurityLake::Subscriber" + }, { "$ref": "#/definitions/AWS::ServiceCatalog::AcceptedPortfolioShare" }, @@ -269861,6 +275215,9 @@ { "$ref": "#/definitions/AWS::Timestream::Database" }, + { + "$ref": "#/definitions/AWS::Timestream::InfluxDBInstance" + }, { "$ref": "#/definitions/AWS::Timestream::ScheduledQuery" },