I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.
Related
I have a terraform code that create an EC2 type Batch job , and my aws batch job download some data a total of 50GB ,how to add that storage space to my instance in terrafrom ? and if there is another way to add that storage
This is my terrafrom code
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus}
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
If you want you may be able to mount a shared EFS drive you could try something like this. Keep in mind I have not tested this & you will need to replace certain parameters with your subnet-ids, vpc-id, etc:
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus},
"mountPoints": [
{
readOnly = null,
containerPath = "/var/batch"
sourceVolume = "YOUR-FILE-SYSTEM-NAME"
}
]
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
resource "aws_efs_file_system" "general" {
creation_token = "YOUR-FILE-SYSTEM-NAME"
#kms_key_id = module.kms.arn
#encrypted = true
encrypted = false
performance_mode = "generalPurpose"
throughput_mode = "provisioned"
provisioned_throughput_in_mibps = 8
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
}
resource "aws_efs_access_point" "general" {
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
file_system_id = aws_efs_file_system.general.id
root_directory {
path = "/YOUR-FILE-SYSTEM-NAME"
creation_info {
owner_gid = "1000"
owner_uid = "1000"
permissions = "755"
}
}
posix_user {
uid = "1000"
gid = "1000"
}
}
## FOR REDUNDANCY
## It is a good idea to add a mount target per AZ you use
resource "aws_efs_mount_target" "a" {
source = "app.terraform.io/popreach/efs-mount-target/aws"
version = "1.0.0"
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-A
security_groups = [aws_security_group.general.id]
}
resource "aws_efs_mount_target" "b" {
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-B
security_groups = [aws_security_group.general.id]
}
resource "aws_security_group" "general" {
name = YOUR-SECURITY-GROUP-NAME
vpc_id = YOUR-VPC-ID
tags = {Name = YOUR-SECURITY-GROUP-NAME}
}
resource "aws_security_group_rule" "ingress" {
type = "ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "2049"
to_port = "2049"
protocol = "tcp"
security_group_id = aws_security_group.general.id
}
resource "aws_security_group_rule" "egress" {
type = "egress"
description = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "0"
to_port = "0"
protocol = "all"
security_group_id = aws_security_group.general.id
}
You'll be able to mount your EFS drive on any EC2 Default AMZN Linux Instance like this: mkdir /data/efs && mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport REPLACE_WITH_EFS_DNS:/ /data/efs
I have a terraform resource in which I am trying to make a subnet_id variable dynamic. So I have varibles defined below in which subnet_id = "worker-subnet-1". I want to pass the name of the subnet and fetch the subnet id as I have multiple subnets. How can I do that.
resource "oci_containerengine_node_pool" "node_pool" {
for_each = var.nodepools
cluster_id = oci_containerengine_cluster.cluster[0].id
compartment_id = var.compartment_id
depends_on = [oci_containerengine_cluster.cluster]
kubernetes_version = var.cluster_kubernetes_version
name = each.value["name"]
node_config_details {
placement_configs {
availability_domain = var.availability_domain
subnet_id = oci_core_subnet.each.value["subnet_name"].id
}
size = each.value["size"]
}
node_shape = each.value["node_shape"]
node_shape_config {
#Optional
memory_in_gbs = each.value["memory"]
ocpus = each.value["ocpus"]
}
node_source_details {
image_id = each.value["image_id"]
source_type = "IMAGE"
}
ssh_public_key = file(var.ssh_public_key_path)
}
These are my variables:
nodepools = {
np1 = {
name = "np1"
size = 3
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
np2 = {
name = "np2"
size = 2
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
}
any suggestions?
resource "oci_core_subnet" "snet-workers" {
cidr_block = lookup(var.subnets["snet-workers"], "subnet_cidr")
compartment_id = var.compartment_id
vcn_id = oci_core_virtual_network.base_vcn.id
display_name = lookup(var.subnets["snet-workers"], "display_name")
dns_label = lookup(var.subnets["snet-workers"], "dns_label")
prohibit_public_ip_on_vnic = true
security_list_ids = [oci_core_security_list.private_worker_nodes.id]
route_table_id = oci_core_route_table.rt-nat.id
}
You have to use like below where change <local resource name> to the name you have given for your resource
subnet_id = oci_core_subnet.<local resource name>[each.value.subnet_id].id
I am using Terraform to deploy an ECS EC2 cluster on AWS.
My pipeline is creating a new task definition from docker-compose, then updates the service to use this task definition.
Desired count is 1, deployment_minimum_healthy_percent is 100 and deployment_maximum_percent is 200.
Expected behavior is that the autoscaling group will launch a new EC2 instance to deploy the new task, then kill the old one.
What happens is that I get an error message : "service X was unable to place a task because no container instance met all of its requirements. The closest matching container-instance has insufficient memory available."
No instance is created and the deployment is rolled back. How can I make sure that an extra instance is created when deploying my service ?
Here is my Terraform code :
resource "aws_ecs_cluster" "main_cluster" {
name = var.application_name
tags = {
Environment = var.environment_name
}
}
data "aws_ecs_task_definition" "main_td" {
task_definition = var.application_name
}
resource "aws_ecs_service" "main_service" {
name = var.application_name
cluster = aws_ecs_cluster.main_cluster.id
launch_type = "EC2"
scheduling_strategy = "REPLICA"
task_definition = "${data.aws_ecs_task_definition.main_td.family}:${data.aws_ecs_task_definition.main_td.revision}"
desired_count = var.target_capacity
deployment_minimum_healthy_percent = 100
deployment_maximum_percent = 200
health_check_grace_period_seconds = 10
wait_for_steady_state = false
force_new_deployment = true
load_balancer {
target_group_arn = aws_lb_target_group.main_tg.arn
container_name = var.container_name
container_port = var.container_port
}
ordered_placement_strategy {
type = "binpack"
field = "memory"
}
deployment_circuit_breaker {
enable = true
rollback = true
}
lifecycle {
ignore_changes = [desired_count]
}
tags = {
Environment = var.environment_name
}
}
Auto-scaling group :
data "template_file" "user_data" {
template = "${file("${path.module}/user_data.sh")}"
vars = {
ecs_cluster = "${aws_ecs_cluster.main_cluster.name}"
}
}
resource "aws_launch_configuration" "main_lc" {
name = var.application_name
image_id = var.ami_id
instance_type = var.instance_type
associate_public_ip_address = true
iam_instance_profile = "arn:aws:iam::812844034365:instance-profile/ecsInstanceRole"
security_groups = ["${aws_security_group.main_sg.id}"]
root_block_device {
volume_size = "30"
volume_type = "gp3"
}
user_data = "${data.template_file.user_data.rendered}"
}
resource "aws_autoscaling_policy" "main_asg_policy" {
name = "${var.application_name}-cpu-scale-policy"
policy_type = "TargetTrackingScaling"
autoscaling_group_name = aws_autoscaling_group.main_asg.name
estimated_instance_warmup = 10
target_tracking_configuration {
predefined_metric_specification {
predefined_metric_type = "ASGAverageCPUUtilization"
}
target_value = 40.0
}
}
resource "aws_autoscaling_group" "main_asg" {
name = var.application_name
launch_configuration = aws_launch_configuration.main_lc.name
min_size = var.target_capacity
max_size = var.target_capacity * 2
health_check_type = "EC2"
health_check_grace_period = 10
default_cooldown = 30
desired_capacity = var.target_capacity
vpc_zone_identifier = data.aws_subnet_ids.subnets.ids
wait_for_capacity_timeout = "3m"
instance_refresh {
strategy = "Rolling"
preferences {
min_healthy_percentage = 100
}
}
}
Module is published here https://registry.terraform.io/modules/hboisgibault/ecs-cluster/aws/latest
I'm new to Terraform and unable to deploy the below and banging my head against it. The Terraform is unable to deploy the correct resources to my environment and unsure of what I've missed. Any help is appreciated. Code is below.
Thanks (Any advice in general on Terraform is appreciated. I can tell its going to be something very obvious but a one hour exercise has been bothering me all day!)
provider "aws" {
region="us-east-1"
}
data "aws_vpc" "vpc" {
tags = {
Name = "vpc"
}
}
data "aws_subnet_ids" "ecommerce" {
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_db_subnet_group" "aurora" {
name = "aurora"
subnet_ids = ["${data.aws_subnet_ids.ecommerce.ids}"]
tags = {
Name = "database"
}
}
resource "aws_security_group" "database" {
name = "database"
description = "ecommerce database"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_security_group" "web" {
name = "web"
description = "ecommerce web"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "web"
}
}
resource "aws_security_group_rule" "web_allow_https_in" {
description = "HTTPS access"
type = "ingress"
from_port = 443
to_port=443
protocol = "tcp"
cidr_blocks = ["8.8.8.8/32"]
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "web_allow_mysql_out" {
description = "MySQL access"
type = "egress"
from_port= 3306
to_port = 3306
protocol = "tcp"
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "db_allow_web_in" {
description = "Web access"
type = "ingress"
from_port = 3306
to_port = 3306
protocol="tcp"
security_group_id = "${aws_security_group.database.id}"
source_security_group_id = "${aws_security_group.web.id}"
}
data "aws_ami" "web" {
owners = ["self"]
most_recent = true
filter {
name = "name"
values = ["web*"]
}
}
resource "aws_instance" "web" {
ami = "${data.aws_ami.web.id}"
ebs_optimized = true
instance_type = "t3.micro"
associate_public_ip_address = false
vpc_security_group_ids = ["${aws_security_group.web.id}"]
subnet_id = "${data.aws_subnet_ids.ecommerce.ids[0]}"
ebs_block_device = {
device_name = "/dev/sdb"
volume_type = "gp2"
volume_size = "16"
delete_on_termination = true
}
tags = {
Name = "Web"
}
lifecycle {
ignore_changes = ["ami"]
}
}
resource "aws_rds_cluster" "aurora" {
apply_immediately = true
backup_retention_period = 7
cluster_identifier = "mydatabase"
engine = "aurora"
database_name = "main"
storage_encrypted=true
kms_key_id = "arn:aws**"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
final_snapshot_identifier = "final"
master_password = "*"
master_username = "*"
vpc_security_group_ids = ["${aws_security_group.database.id}"]
}
resource "aws_rds_cluster_instance" "aurora" {
count = 2
apply_immediately = true
identifier = "mydatabase-${count.index}"
cluster_identifier = "${aws_rds_cluster.aurora.id}"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
instance_class = "db.r12.large"
publicly_accessible = false
}
resource "aws_eip" "eip" {
vpc = true
}
resource "aws_eip_association" "assoc" {
instance_id = "${aws_instance.web.id}"
allocation_id = "${aws_eip.eip.id}"
}
data "aws_route53_zone" "zone" {
name = "ecommerce.com"
}
resource "aws_route53_record" "record" {
zone_id = "${data.aws_route53_zone.zone.zone_id}"
type="A"
name = "www.ecommerce.com"
ttl = 300
records = ["${aws_eip.eip.public_ip}"]
}
When I update desired_count, the terraform planner shows that the operation will be an update in-place. However, when terraform tries to apply the changes, I get the following error:
Terraform v0.12.21
Initializing plugins and modules...
2020/03/05 22:10:52 [DEBUG] Using modified User-Agent: Terraform/0.12.21 TFC/8f5a579db5
module.web.aws_ecs_service.web[0]: Modifying... [id=arn:aws:ecs:us-east-1:55555:service/web/web]
Error: Error updating ECS Service (arn:aws:ecs:us-east-1:55555:service/web/web): InvalidParameterException: Unable to update network parameters on services with a CODE_DEPLOY deployment controller. Use AWS CodeDeploy to trigger a new deployment.
The terraform code used to reproduce this looks something like:
resource "aws_lb" "platform" {
name = "platform"
internal = false
load_balancer_type = "application"
ip_address_type = "ipv4"
security_groups = [aws_security_group.lb.id]
subnets = [for subnet in aws_subnet.lb : subnet.id]
enable_deletion_protection = true
tags = {
Name = "platform"
Type = "Public"
}
}
resource "aws_lb_target_group" "platform" {
count = 2
name = "platform-tg-${count.index + 1}"
vpc_id = var.vpc_id
protocol = "HTTP"
port = 80
target_type = "ip"
stickiness {
type = "lb_cookie"
enabled = false
}
health_check {
path = "/healthcheck"
port = var.container_port
protocol = "HTTP"
timeout = 5
healthy_threshold = 5
unhealthy_threshold = 3
matcher = "200"
}
tags = {
Name = "platform-tg-${count.index + 1}"
Type = "Public"
}
}
resource "aws_lb_listener" "platform-https" {
load_balancer_arn = aws_lb.platform.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
certificate_arn = var.certificate_arn
depends_on = [aws_lb_target_group.platform]
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.platform[0].arn
}
lifecycle {
ignore_changes = [
default_action
]
}
}
locals {
family = "platform"
container_name = "web"
}
resource "aws_cloudwatch_log_group" "platform" {
name = "/aws/ecs/platform"
retention_in_days = 3653
tags = {
Name = "platform"
}
}
resource "aws_ecs_task_definition" "platform" {
family = local.family
requires_compatibilities = ["FARGATE"]
cpu = var.service.cpu
memory = var.service.memory
network_mode = "awsvpc"
execution_role_arn = aws_iam_role.ecs_task_execution.arn
task_role_arn = aws_iam_role.ecs_task_execution.arn
container_definitions = jsonencode(
jsondecode(
templatefile("${path.module}/taskdef.json", {
family = local.family
container_name = local.container_name
region = var.region
account_id = var.account_id
cpu = var.service.cpu
memory = var.service.memory
image = var.service.container_image
log_group = aws_cloudwatch_log_group.platform.name
node_env = var.node_env
port = var.container_port
platform_url = var.platform_url
short_url = var.short_url
cdn_url = var.cdn_url
})
).containerDefinitions
)
tags = {
Name = "platform"
Type = "Private"
}
}
resource "aws_ecs_cluster" "platform" {
name = "platform"
setting {
name = "containerInsights"
value = "enabled"
}
tags = {
Name = "platform"
Type = "Public"
}
}
data "aws_lb_listener" "current-platform" {
arn = aws_lb_listener.platform-https.arn
}
data "aws_ecs_task_definition" "current-platform" {
task_definition = local.family
}
resource "aws_ecs_service" "platform" {
count = var.delete_platform_ecs_service ? 0 : 1
name = "platform"
cluster = aws_ecs_cluster.platform.arn
launch_type = "FARGATE"
desired_count = var.service.container_count
enable_ecs_managed_tags = true
task_definition = "${aws_ecs_task_definition.platform.family}:${max(aws_ecs_task_definition.platform.revision, data.aws_ecs_task_definition.current-platform.revision)}"
depends_on = [aws_lb_target_group.platform]
load_balancer {
target_group_arn = data.aws_lb_listener.current-platform.default_action[0].target_group_arn
container_name = local.container_name
container_port = var.container_port
}
network_configuration {
subnets = sort([for subnet in aws_subnet.ecs : subnet.id])
security_groups = [aws_security_group.ecs.id]
}
deployment_controller {
type = "CODE_DEPLOY"
}
lifecycle {
// NOTE: Based on: https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
// If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created.
ignore_changes = [
load_balancer,
network_configuration,
task_definition
]
}
tags = {
Name = "platform"
Type = "Private"
}
}
This is using Terraform v0.12.21. Full debug output is available at: https://gist.github.com/jgeurts/f4d930608a119e9cd75a7a54b111ee7c
This is maybe not the best answer, but I wasn't able to get terraform to adjust only the desired_count. Instead, I added auto scaling to the ECS service:
Ignore desired_count:
resource "aws_ecs_service" "platform" {
...
lifecycle {
// NOTE: Based on: https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
// If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created.
ignore_changes = [
desired_count, # Preserve desired count when updating an autoscaled ECS Service
load_balancer,
network_configuration,
task_definition,
]
}
}
Add auto-scaling:
resource "aws_appautoscaling_target" "platform" {
max_capacity = var.max_capacity
min_capacity = var.min_capacity
resource_id = "service/${aws_ecs_cluster.platform.name}/${aws_ecs_cluster.platform.name}"
scalable_dimension = "ecs:service:DesiredCount"
service_namespace = "ecs"
depends_on = [
aws_ecs_cluster.platform,
]
}
resource "aws_appautoscaling_policy" "platform" {
name = "platform-auto-scale"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
policy_type = "TargetTrackingScaling"
target_tracking_scaling_policy_configuration {
target_value = var.service.autoscale_target_cpu_percentage
scale_out_cooldown = 60
scale_in_cooldown = 300
predefined_metric_specification {
predefined_metric_type = "ECSServiceAverageCPUUtilization"
}
}
}
resource "aws_appautoscaling_scheduled_action" "platform_0430_increase_min_capacity" {
name = "platform-0430-increase-min-capacity"
schedule = "cron(30 4 * * ? *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity + 4
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_0615_restore_min_capacity" {
name = "platform-0615-restore-min-capacity"
schedule = "cron(15 06 * * ? *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_weekday_0945_increase_min_capacity" {
name = "platform-weekday-0945-increase-min-capacity"
schedule = "cron(45 9 ? * MON-FRI *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity + 4
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_weekday_2100_restore_min_capacity" {
name = "platform-weekday-2100-restore-min-capacity"
schedule = "cron(0 2100 ? * MON-FRI *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity
max_capacity = var.max_capacity
}
}