Add storage to AWS batch job in Terrafrom - python-3.x

I have a terraform code that create an EC2 type Batch job , and my aws batch job download some data a total of 50GB ,how to add that storage space to my instance in terrafrom ? and if there is another way to add that storage
This is my terrafrom code
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus}
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}

If you want you may be able to mount a shared EFS drive you could try something like this. Keep in mind I have not tested this & you will need to replace certain parameters with your subnet-ids, vpc-id, etc:
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus},
"mountPoints": [
{
readOnly = null,
containerPath = "/var/batch"
sourceVolume = "YOUR-FILE-SYSTEM-NAME"
}
]
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
resource "aws_efs_file_system" "general" {
creation_token = "YOUR-FILE-SYSTEM-NAME"
#kms_key_id = module.kms.arn
#encrypted = true
encrypted = false
performance_mode = "generalPurpose"
throughput_mode = "provisioned"
provisioned_throughput_in_mibps = 8
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
}
resource "aws_efs_access_point" "general" {
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
file_system_id = aws_efs_file_system.general.id
root_directory {
path = "/YOUR-FILE-SYSTEM-NAME"
creation_info {
owner_gid = "1000"
owner_uid = "1000"
permissions = "755"
}
}
posix_user {
uid = "1000"
gid = "1000"
}
}
## FOR REDUNDANCY
## It is a good idea to add a mount target per AZ you use
resource "aws_efs_mount_target" "a" {
source = "app.terraform.io/popreach/efs-mount-target/aws"
version = "1.0.0"
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-A
security_groups = [aws_security_group.general.id]
}
resource "aws_efs_mount_target" "b" {
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-B
security_groups = [aws_security_group.general.id]
}
resource "aws_security_group" "general" {
name = YOUR-SECURITY-GROUP-NAME
vpc_id = YOUR-VPC-ID
tags = {Name = YOUR-SECURITY-GROUP-NAME}
}
resource "aws_security_group_rule" "ingress" {
type = "ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "2049"
to_port = "2049"
protocol = "tcp"
security_group_id = aws_security_group.general.id
}
resource "aws_security_group_rule" "egress" {
type = "egress"
description = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "0"
to_port = "0"
protocol = "all"
security_group_id = aws_security_group.general.id
}
You'll be able to mount your EFS drive on any EC2 Default AMZN Linux Instance like this: mkdir /data/efs && mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport REPLACE_WITH_EFS_DNS:/ /data/efs

Related

Terraform: How would I reference a variable in For_Each that is not included in a map, such as file_system_id?

Maybe this is possible, maybe it's not. I'm attempting to mount an EFS target using some of the values stored in a var.ec2_server map which includes subnets, EBS volumes, etc.
The issue I've run into is that I created the EFS File System using a for_each statement; since the efs_file_system was created with a for_each, I must reference the attributes within specified instances when referring to the resource in other variables.
The file_system_id is only known after creation so how would I reference it within a map or other variable inside other for_each statements, such as the aws_efs_mount_target resource defined below? Will what I'm doing even work?
I'm using the antiquated resource.tf > variable.tf > terraform.tfvars (config) style code :
...the ec2.tf file:
###############################################################################
# EC2 Instance
resource "aws_instance" "ec2" {
for_each = var.ec2_servers
ami = data.aws_ami.ec2[each.key].id
disable_api_termination = var.disable_api_termination
iam_instance_profile = aws_iam_instance_profile.ec2[each.key].id
instance_type = each.value.instance_type
monitoring = true
vpc_security_group_ids = [aws_security_group.ec2[each.key].id]
subnet_id = each.value.subnet_name != null ? aws_subnet.private["${each.value.vpc_name}.${each.value.subnet_name}.${each.value.availability_zone}"].id : null
key_name = aws_key_pair.ec2.key_name
user_data = each.value.user_data == "" ? null : templatefile("./${each.value.user_data}", { region = data.aws_region.current.name })
private_ip = each.value.private_ip
metadata_options {
http_endpoint = "enabled"
http_tokens = "required"
}
root_block_device {
delete_on_termination = true
encrypted = true
volume_size = each.value.root_volume_size
volume_type = "gp2"
tags = {
Name = replace("${var.project_name}-${each.value.vpc_name}-${each.key}-EBS", " ", "")
}
}
dynamic "ebs_block_device" {
for_each = each.value.ebs_volumes
content {
volume_type = ebs_block_device.value.volume_type
volume_size = ebs_block_device.value.volume_size
device_name = ebs_block_device.value.device_name
tags = {
Name = replace("${var.project_name}-${each.value.vpc_name}-${each.key}-EBS", " ", "") }
}
}
tags = {
Name = replace("${var.project_name}-${each.value.vpc_name}-${each.key}-EC2", " ", "")
Backup = "true"
}
}
...the efs.tf file:
###############################################################################
# Create EFS File System
resource "aws_efs_file_system" "efs" {
for_each = {
for object, property in var.efs_config : object => property if var.efs_config.efs_enabled
}
creation_token = var.efs_config.efs_creation_token
encrypted = var.efs_config.efs_encrypt
kms_key_id = aws_kms_key.efs_kms.arn
tags = {
Name = replace("${var.project_name}-${var.efs_config.efs_vpc}-EFS", " ", "")
}
}
resource "aws_efs_backup_policy" "efs_backup_policy" {
file_system_id = "NEEDS TO BE DETERMINED"
backup_policy {
status = "ENABLED"
}
}
resource "aws_efs_mount_target" "efs_mount_target" {
for_each = var.ec2_servers
file_system_id = "NEEDS TO BE DETERMINED"
subnet_id = each.value.subnet_name == "app" ? aws_subnet.private["${each.value.vpc_name}.${each.value.subnet_name}.${each.value.availability_zone}"].id : null
ip_address = lookup(var.efs_config, "efs_private_ip")
security_groups = [aws_security_group.ec2[each.key].id]
}
...the variables.tf file:
variable "ec2_servers" {
description = "A configurable map of EC2 settings."
type = map(any)
}
...the terraform.tfvars file:
###############################################################################
# EFS Configurations
efs_config = {
efs_enabled = true
efs_creation_token = "Prod_EFS"
efs_encrypt = true
efs_vpc = "Prod"
efs_private_ip = "10.200.0.5"
}
# Server Configurations
ec2_servers = {
EC201 = {
ami_owner = "XXXXXXXXXXXX"
ami_name = "xxxxx-xxxxxx"
instance_type = "t2.micro"
root_volume_size = "10"
ebs_volumes = [
{
volume_size = "20"
volume_type = "gp3"
device_name = "/dev/xvdba"
},
{
volume_size = "20"
volume_type = "gp3"
device_name = "/dev/xvdbb"
}
]
vpc_name = "Prod"
subnet_name = "web"
set_ec2_hostname = false
ec2_hostname = "xxxxxxxxx"
availability_zone = "a"
public_dns = false
private_dns = true
policy_names = []
s3_storage = false
transfer_files = false
user_data = "setup_ssm_linux.tftpl"
private_ip = "10.200.0.132"
ingress = {
ssh = {
description = "Internal address"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.200.0.0/22"
]
}
}
}
}
I've tried a number of things such as creating a data resource for aws_efs_mount_target and nothing I do seems to work. If anyone could provide a little insight, both my project leads and myself would be greatly appreciated!
If I missed anything here, please let me know and I will update the question with the relevant information.
Your aws_efs_backup_policy needs a for_each also, since you need to create one for each EFS volume:
resource "aws_efs_backup_policy" "efs_backup_policy" {
for_each = aws_efs_file_system.efs
file_system_id = each.id
backup_policy {
status = "ENABLED"
}
}
For your EFS mount target, I would use the same for_each you use for the EFS volumes:
resource "aws_efs_mount_target" "efs_mount_target" {
for_each = {
for object, property in var.efs_config : object => property if var.efs_config.efs_enabled
}
file_system_id = aws_efs_file_system.efs[each.key].id
...
}
I think you need to clean up those other lookups in aws_efs_mount_target by moving those values into the efs_config var.

Inappropriate value for "role":string required terraform12

I have written a terraform configuration file for a bastion entry point on an application.
ami = var.ami
ebs_optimized = var.ebs_optimized
iam_instance_profile = aws_iam_instance_profile.iam_instance_profile
instance_type = var.instance_type
key_name = "quadops"
subnet_id = var.subnet_id
user_data = var.user_data
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
volume_tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
vpc_security_group_ids = [aws_security_group.security_group.id]
}
resource "aws_security_group" "security_group" {
name = "${var.name}-security-group"
vpc_id = var.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_instance_profile" "iam_instance_profile" {
name = "${var.name}-iam-instance-profile"
role = aws_iam_role.iam_role
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_role" "iam_role" {
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
name = "${var.name}-iam-role"
tags = {
Name = "${var.name}-iam-role"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_eip" "eip" {
vpc = true
instance = aws_instance.instance.id
tags = {
Name = "${var.name}-eip"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "cloudflare_record" "record" {
zone_id = var.zone_id
name = "bastion.${var.env}"
type = "A"
value = "aws_eip.eip.public_ip"
}
Upon running plan, i'm getting this error.
on .terraform/modules/bastion/main.tf line 49, in resource "aws_iam_instance_profile" "iam_instance_profile":
49: role = aws_iam_role.iam_role
|----------------
| aws_iam_role.iam_role is object with 15 attributes
Inappropriate value for attribute "role": string required.
I can't seem to get over this hurdle as I think i'm calling the resource correctly but terraform12 says that it requires a string am I passing the values incorrectly? Thanks.
You are passing the entire aws_iam_role object to the role argument which is causing the error. Instead, try passing the name of the role like so:
resource "aws_iam_instance_profile" "iam_instance_profile" {
role = aws_iam_role.iam_role.name
}

Terraform does not deploy correct resources

I'm new to Terraform and unable to deploy the below and banging my head against it. The Terraform is unable to deploy the correct resources to my environment and unsure of what I've missed. Any help is appreciated. Code is below.
Thanks (Any advice in general on Terraform is appreciated. I can tell its going to be something very obvious but a one hour exercise has been bothering me all day!)
provider "aws" {
region="us-east-1"
}
data "aws_vpc" "vpc" {
tags = {
Name = "vpc"
}
}
data "aws_subnet_ids" "ecommerce" {
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_db_subnet_group" "aurora" {
name = "aurora"
subnet_ids = ["${data.aws_subnet_ids.ecommerce.ids}"]
tags = {
Name = "database"
}
}
resource "aws_security_group" "database" {
name = "database"
description = "ecommerce database"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_security_group" "web" {
name = "web"
description = "ecommerce web"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "web"
}
}
resource "aws_security_group_rule" "web_allow_https_in" {
description = "HTTPS access"
type = "ingress"
from_port = 443
to_port=443
protocol = "tcp"
cidr_blocks = ["8.8.8.8/32"]
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "web_allow_mysql_out" {
description = "MySQL access"
type = "egress"
from_port= 3306
to_port = 3306
protocol = "tcp"
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "db_allow_web_in" {
description = "Web access"
type = "ingress"
from_port = 3306
to_port = 3306
protocol="tcp"
security_group_id = "${aws_security_group.database.id}"
source_security_group_id = "${aws_security_group.web.id}"
}
data "aws_ami" "web" {
owners = ["self"]
most_recent = true
filter {
name = "name"
values = ["web*"]
}
}
resource "aws_instance" "web" {
ami = "${data.aws_ami.web.id}"
ebs_optimized = true
instance_type = "t3.micro"
associate_public_ip_address = false
vpc_security_group_ids = ["${aws_security_group.web.id}"]
subnet_id = "${data.aws_subnet_ids.ecommerce.ids[0]}"
ebs_block_device = {
device_name = "/dev/sdb"
volume_type = "gp2"
volume_size = "16"
delete_on_termination = true
}
tags = {
Name = "Web"
}
lifecycle {
ignore_changes = ["ami"]
}
}
resource "aws_rds_cluster" "aurora" {
apply_immediately = true
backup_retention_period = 7
cluster_identifier = "mydatabase"
engine = "aurora"
database_name = "main"
storage_encrypted=true
kms_key_id = "arn:aws**"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
final_snapshot_identifier = "final"
master_password = "*"
master_username = "*"
vpc_security_group_ids = ["${aws_security_group.database.id}"]
}
resource "aws_rds_cluster_instance" "aurora" {
count = 2
apply_immediately = true
identifier = "mydatabase-${count.index}"
cluster_identifier = "${aws_rds_cluster.aurora.id}"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
instance_class = "db.r12.large"
publicly_accessible = false
}
resource "aws_eip" "eip" {
vpc = true
}
resource "aws_eip_association" "assoc" {
instance_id = "${aws_instance.web.id}"
allocation_id = "${aws_eip.eip.id}"
}
data "aws_route53_zone" "zone" {
name = "ecommerce.com"
}
resource "aws_route53_record" "record" {
zone_id = "${data.aws_route53_zone.zone.zone_id}"
type="A"
name = "www.ecommerce.com"
ttl = 300
records = ["${aws_eip.eip.public_ip}"]
}

ECS and Application Load Balancer Ephemeral Ports using Terraform

I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.

How do I update desired_count for existing ECS service with CODE_DEPLOY deployment controller?

When I update desired_count, the terraform planner shows that the operation will be an update in-place. However, when terraform tries to apply the changes, I get the following error:
Terraform v0.12.21
Initializing plugins and modules...
2020/03/05 22:10:52 [DEBUG] Using modified User-Agent: Terraform/0.12.21 TFC/8f5a579db5
module.web.aws_ecs_service.web[0]: Modifying... [id=arn:aws:ecs:us-east-1:55555:service/web/web]
Error: Error updating ECS Service (arn:aws:ecs:us-east-1:55555:service/web/web): InvalidParameterException: Unable to update network parameters on services with a CODE_DEPLOY deployment controller. Use AWS CodeDeploy to trigger a new deployment.
The terraform code used to reproduce this looks something like:
resource "aws_lb" "platform" {
name = "platform"
internal = false
load_balancer_type = "application"
ip_address_type = "ipv4"
security_groups = [aws_security_group.lb.id]
subnets = [for subnet in aws_subnet.lb : subnet.id]
enable_deletion_protection = true
tags = {
Name = "platform"
Type = "Public"
}
}
resource "aws_lb_target_group" "platform" {
count = 2
name = "platform-tg-${count.index + 1}"
vpc_id = var.vpc_id
protocol = "HTTP"
port = 80
target_type = "ip"
stickiness {
type = "lb_cookie"
enabled = false
}
health_check {
path = "/healthcheck"
port = var.container_port
protocol = "HTTP"
timeout = 5
healthy_threshold = 5
unhealthy_threshold = 3
matcher = "200"
}
tags = {
Name = "platform-tg-${count.index + 1}"
Type = "Public"
}
}
resource "aws_lb_listener" "platform-https" {
load_balancer_arn = aws_lb.platform.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
certificate_arn = var.certificate_arn
depends_on = [aws_lb_target_group.platform]
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.platform[0].arn
}
lifecycle {
ignore_changes = [
default_action
]
}
}
locals {
family = "platform"
container_name = "web"
}
resource "aws_cloudwatch_log_group" "platform" {
name = "/aws/ecs/platform"
retention_in_days = 3653
tags = {
Name = "platform"
}
}
resource "aws_ecs_task_definition" "platform" {
family = local.family
requires_compatibilities = ["FARGATE"]
cpu = var.service.cpu
memory = var.service.memory
network_mode = "awsvpc"
execution_role_arn = aws_iam_role.ecs_task_execution.arn
task_role_arn = aws_iam_role.ecs_task_execution.arn
container_definitions = jsonencode(
jsondecode(
templatefile("${path.module}/taskdef.json", {
family = local.family
container_name = local.container_name
region = var.region
account_id = var.account_id
cpu = var.service.cpu
memory = var.service.memory
image = var.service.container_image
log_group = aws_cloudwatch_log_group.platform.name
node_env = var.node_env
port = var.container_port
platform_url = var.platform_url
short_url = var.short_url
cdn_url = var.cdn_url
})
).containerDefinitions
)
tags = {
Name = "platform"
Type = "Private"
}
}
resource "aws_ecs_cluster" "platform" {
name = "platform"
setting {
name = "containerInsights"
value = "enabled"
}
tags = {
Name = "platform"
Type = "Public"
}
}
data "aws_lb_listener" "current-platform" {
arn = aws_lb_listener.platform-https.arn
}
data "aws_ecs_task_definition" "current-platform" {
task_definition = local.family
}
resource "aws_ecs_service" "platform" {
count = var.delete_platform_ecs_service ? 0 : 1
name = "platform"
cluster = aws_ecs_cluster.platform.arn
launch_type = "FARGATE"
desired_count = var.service.container_count
enable_ecs_managed_tags = true
task_definition = "${aws_ecs_task_definition.platform.family}:${max(aws_ecs_task_definition.platform.revision, data.aws_ecs_task_definition.current-platform.revision)}"
depends_on = [aws_lb_target_group.platform]
load_balancer {
target_group_arn = data.aws_lb_listener.current-platform.default_action[0].target_group_arn
container_name = local.container_name
container_port = var.container_port
}
network_configuration {
subnets = sort([for subnet in aws_subnet.ecs : subnet.id])
security_groups = [aws_security_group.ecs.id]
}
deployment_controller {
type = "CODE_DEPLOY"
}
lifecycle {
// NOTE: Based on: https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
// If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created.
ignore_changes = [
load_balancer,
network_configuration,
task_definition
]
}
tags = {
Name = "platform"
Type = "Private"
}
}
This is using Terraform v0.12.21. Full debug output is available at: https://gist.github.com/jgeurts/f4d930608a119e9cd75a7a54b111ee7c
This is maybe not the best answer, but I wasn't able to get terraform to adjust only the desired_count. Instead, I added auto scaling to the ECS service:
Ignore desired_count:
resource "aws_ecs_service" "platform" {
...
lifecycle {
// NOTE: Based on: https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
// If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created.
ignore_changes = [
desired_count, # Preserve desired count when updating an autoscaled ECS Service
load_balancer,
network_configuration,
task_definition,
]
}
}
Add auto-scaling:
resource "aws_appautoscaling_target" "platform" {
max_capacity = var.max_capacity
min_capacity = var.min_capacity
resource_id = "service/${aws_ecs_cluster.platform.name}/${aws_ecs_cluster.platform.name}"
scalable_dimension = "ecs:service:DesiredCount"
service_namespace = "ecs"
depends_on = [
aws_ecs_cluster.platform,
]
}
resource "aws_appautoscaling_policy" "platform" {
name = "platform-auto-scale"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
policy_type = "TargetTrackingScaling"
target_tracking_scaling_policy_configuration {
target_value = var.service.autoscale_target_cpu_percentage
scale_out_cooldown = 60
scale_in_cooldown = 300
predefined_metric_specification {
predefined_metric_type = "ECSServiceAverageCPUUtilization"
}
}
}
resource "aws_appautoscaling_scheduled_action" "platform_0430_increase_min_capacity" {
name = "platform-0430-increase-min-capacity"
schedule = "cron(30 4 * * ? *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity + 4
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_0615_restore_min_capacity" {
name = "platform-0615-restore-min-capacity"
schedule = "cron(15 06 * * ? *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_weekday_0945_increase_min_capacity" {
name = "platform-weekday-0945-increase-min-capacity"
schedule = "cron(45 9 ? * MON-FRI *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity + 4
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_weekday_2100_restore_min_capacity" {
name = "platform-weekday-2100-restore-min-capacity"
schedule = "cron(0 2100 ? * MON-FRI *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity
max_capacity = var.max_capacity
}
}

Resources