I created an ECS cluster, along with a Load Balancer, to expose a basc hello-world node app on Fargate using Terraform. Terraform manages to create my aws resources just fine, and deploys the correct image on ECS Fargate, but the task never passes the initial health-check and restarts indefinitely. I think this is a port-forwarding problem, but I believe my Dockerfile, Load Balancer and Task Definition all expose the correct ports.
Below is the error I see when looking at my service's "events" tab on the ECS dashboard:
service my-first-service (port 2021) is unhealthy in target-group target-group due to (reason Request timed out).
Below is my Application code, the Dockerfile, and the Terraform files I am using to deploy to Fargate:
index.js
const express = require('express')
const app = express()
const port = 2021
app.get('/', (req, res) => res.send('Hello World!'))
app.listen(port, () => console.log(`Example app listening on port ${port}!`))
Dockerfile
# Use an official Node runtime as a parent image
FROM node:12.7.0-alpine
# Set the working directory to /app
WORKDIR '/app'
# Copy package.json to the working directory
COPY package.json .
# Install any needed packages specified in package.json
RUN yarn
# Copying the rest of the code to the working directory
COPY . .
# Make port 2021 available to the world outside this container
EXPOSE 2021
# Run index.js when the container launches
CMD ["node", "index.js"]
application_load_balancer_target_group.tf
resource "aws_lb_target_group" "target_group" {
name = "target-group"
port = 80
protocol = "HTTP"
target_type = "ip"
vpc_id = "${aws_default_vpc.default_vpc.id}" # Referencing the default VPC
health_check {
matcher = "200,301,302"
path = "/"
}
}
resource "aws_lb_listener" "listener" {
load_balancer_arn = "${aws_alb.application_load_balancer.arn}" # Referencing our load balancer
port = "80"
protocol = "HTTP"
default_action {
type = "forward"
target_group_arn = "${aws_lb_target_group.target_group.arn}" # Referencing our tagrte group
}
}
application_load_balaner.tf
resource "aws_alb" "application_load_balancer" {
name = "test-lb-tf" # Naming our load balancer
load_balancer_type = "application"
subnets = [ # Referencing the default subnets
"${aws_default_subnet.default_subnet_a.id}",
"${aws_default_subnet.default_subnet_b.id}",
"${aws_default_subnet.default_subnet_c.id}"
]
# Referencing the security group
security_groups = ["${aws_security_group.load_balancer_security_group.id}"]
}
# Creating a security group for the load balancer:
resource "aws_security_group" "load_balancer_security_group" {
ingress {
from_port = 80 # Allowing traffic in from port 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"] # Allowing traffic in from all sources
}
egress {
from_port = 0 # Allowing any incoming port
to_port = 0 # Allowing any outgoing port
protocol = "-1" # Allowing any outgoing protocol
cidr_blocks = ["0.0.0.0/0"] # Allowing traffic out to all IP addresses
}
}
ecs_cluster.tf
resource "aws_ecs_cluster" "my_cluster" {
name = "my-cluster" # Naming the cluster
}
ecs_service.tf
# Providing a reference to our default VPC (these are needed by the aws_ecs_service at the bottom of this file)
resource "aws_default_vpc" "default_vpc" {
}
# Providing a reference to our default subnets (NOTE: Make sure the availability zones match your zone)
resource "aws_default_subnet" "default_subnet_a" {
availability_zone = "us-east-2a"
}
resource "aws_default_subnet" "default_subnet_b" {
availability_zone = "us-east-2b"
}
resource "aws_default_subnet" "default_subnet_c" {
availability_zone = "us-east-2c"
}
resource "aws_ecs_service" "my_first_service" {
name = "my-first-service" # Naming our first service
cluster = "${aws_ecs_cluster.my_cluster.id}" # Referencing our created Cluster
task_definition = "${aws_ecs_task_definition.my_first_task.arn}" # Referencing the task our service will spin up
launch_type = "FARGATE"
desired_count = 1 # Setting the number of containers we want deployed to 1
# NOTE: The following 'load_balancer' snippet was added here after the creation of the application_load_balancer files.
load_balancer {
target_group_arn = "${aws_lb_target_group.target_group.arn}" # Referencing our target group
container_name = "${aws_ecs_task_definition.my_first_task.family}"
container_port = 2021 # Specifying the container port
}
network_configuration {
subnets = ["${aws_default_subnet.default_subnet_a.id}", "${aws_default_subnet.default_subnet_b.id}", "${aws_default_subnet.default_subnet_c.id}"]
assign_public_ip = true # Providing our containers with public IPs
}
}
resource "aws_security_group" "service_security_group" {
ingress {
from_port = 0
to_port = 0
protocol = "-1"
# Only allowing traffic in from the load balancer security group
security_groups = ["${aws_security_group.load_balancer_security_group.id}"]
}
egress {
from_port = 0 # Allowing any incoming port
to_port = 0 # Allowing any outgoing port
protocol = "-1" # Allowing any outgoing protocol
cidr_blocks = ["0.0.0.0/0"] # Allowing traffic out to all IP addresses
}
}
ecs_task_definition.tf
resource "aws_ecs_task_definition" "my_first_task" {
family = "my-first-task" # Naming our first task
container_definitions = <<DEFINITION
[
{
"name": "my-first-task",
"image": "${var.ECR_IMAGE_URL}",
"essential": true,
"portMappings": [
{
"containerPort": 2021,
"hostPort": 2021
}
],
"memory": 512,
"cpu": 256
}
]
DEFINITION
requires_compatibilities = ["FARGATE"] # Stating that we are using ECS Fargate
network_mode = "awsvpc" # Using awsvpc as our network mode as this is required for Fargate
memory = 512 # Specifying the memory our container requires
cpu = 256 # Specifying the CPU our container requires
execution_role_arn = "${aws_iam_role.ecsTaskExecutionRole.arn}"
}
resource "aws_iam_role" "ecsTaskExecutionRole" {
name = "ecsTaskExecutionRole"
assume_role_policy = "${data.aws_iam_policy_document.assume_role_policy.json}"
}
data "aws_iam_policy_document" "assume_role_policy" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ecs-tasks.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy_attachment" "ecsTaskExecutionRole_policy" {
role = "${aws_iam_role.ecsTaskExecutionRole.name}"
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"
}
Where am I going wrong here?
I had same similar issue when I was migrating from k8s to ECS Fargate.
My task could not start, it was nightmare.
Same image in k8s was working great with same health checks.
I can see that you are missing healthCheck in task_definition, at least that was issue for me.
here is my containerDefinition :
container_definitions = jsonencode([{
name = "${var.app_name}-container-${var.environment}"
image = "${var.container_repository}:${var.container_image_version}"
essential = true
environment: concat(
var.custom_env_variables,
[
{
name = "JAVA_TOOL_OPTIONS"
value = "-Xmx${var.container_memory_max_ram}m -XX:MaxRAM=${var.container_memory_max_ram}m -XX:+UseParallelGC -XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=10 -XX:GCTimeRatio=4"
},
{
name = "SPRING_PROFILES_ACTIVE"
value = var.spring_profile
},
{
name = "APP_NAME"
value = var.spring_app_name
}
]
)
portMappings = [
{
protocol = "tcp"
containerPort = var.container_port
},
{
protocol = "tcp"
containerPort = var.container_actuator_port
}
]
healthCheck = {
retries = 10
command = [ "CMD-SHELL", "curl -f http://localhost:8081/actuator/liveness || exit 1" ]
timeout: 5
interval: 10
startPeriod: var.health_start_period
}
logConfiguration = {
logDriver = "awslogs"
options = {
awslogs-group = aws_cloudwatch_log_group.main.name
awslogs-stream-prefix = "ecs"
awslogs-region = var.aws_region
}
}
mountPoints = [{
sourceVolume = "backend_efs",
containerPath = "/data",
readOnly = false
}]
}])
there is healthCheck aprt:
healthCheck = {
retries = 10
command = [ "CMD-SHELL", "curl -f http://localhost:8081/actuator/liveness || exit 1" ]
timeout: 5
interval: 10
startPeriod: var.health_start_period
}
container in order to start needs to have a way to check is that task running OK.
And I could only get that via curl . I have one endpoint that returns me is it live or not. You need to specify your, it is jut important that return 200.
Also there is no curl command by default, you need to add it in you DockerFile as that was next issue where I spent few hours, as there was not clear error on ECS.
I added this line:
RUN apt-get update && apt-get install -y --no-install-recommends curl
By the look of it, you are create new VPC with subnets, but there are no route tables defined, no internet gateway and attached to the VPC. So your VPC is simply private and not accessible from the internet, nor it can access ECR to get your docker image.
Maybe instead of creating a new VPC called default_vpc, you want to use an existing default vpc. If so you have to use data source:
data "aws_vpc" "default_vpc" {
default = true
}
to get subnets:
data "aws_subnet_ids" "default" {
vpc_id = data.aws_vpc.default_vpc.id
}
and modify the remaining of the code to reference these data sources.
Also for Fargate, it should remove:
"hostPort": 2021
And you forgot to setup security group for your ECS service. It should be:
network_configuration {
subnets = data.aws_subnet_ids.default.ids
assign_public_ip = true # Providing our containers with public IPs
security_groups = [aws_security_group.service_security_group.id]
}
Related
I am trying to implement AKS Baselines with terraform, but I can't get my Application Gateway connect to the internal load balancer created by AKS.
My AKS config contains of a solr instance and a service with azure-load-balancer-internal annotation. AKS and created LB are in the same SUBNET while Application Gateway has it's own SUBNET, but they are all in the same VNET.
Kubernetes.tf
resource "kubernetes_service" "solr-service" {
metadata {
name = local.solr.name
annotations = {
"service.beta.kubernetes.io/azure-load-balancer-internal" : "true"
"service.beta.kubernetes.io/azure-load-balancer-internal-subnet" : "aks-subnet"
}
}
spec {
external_traffic_policy = "Local"
selector = {
app = kubernetes_deployment.solr.metadata.0.labels.app
}
port {
name = "http"
port = 80
target_port = 8983
}
type = "LoadBalancer"
load_balancer_ip = "192.168.1.200"
}
}
This config creates an internal load balancer in the MC_* resource group with frontend IP 192.168.1.200. The health check in the metrics blade is returning 100. So it looks like the created internal loadbalancer is working as expected.
Now I am trying to add this load balancer as backend_pool target in my Application gateway.
application-gateway.tf
resource "azurerm_application_gateway" "agw" {
name = local.naming.agw_name
resource_group_name = azurerm_resource_group.this.name
location = azurerm_resource_group.this.location
sku {
name = "Standard_Medium"
tier = "Standard"
capacity = 1
}
gateway_ip_configuration {
name = "Gateway-IP-Config"
subnet_id = azurerm_subnet.agw_snet.id
}
frontend_port {
name = "http-port"
port = 80
}
frontend_ip_configuration {
name = "public-ip"
public_ip_address_id = azurerm_public_ip.agw_ip.id
}
backend_address_pool {
name = "lb"
ip_addresses = ["192.168.1.200"]
}
backend_http_settings {
name = "settings"
cookie_based_affinity = "Disabled"
port = 80
protocol = "Http"
request_timeout = 60
}
http_listener {
name = "http-listener"
frontend_ip_configuration_name = "public-ip"
frontend_port_name = "http-port"
protocol = "Http"
}
request_routing_rule {
name = local.request_routing_rule_name
rule_type = "Basic"
http_listener_name = "http-listener"
backend_address_pool_name = "lb"
backend_http_settings_name = "settings"
}
}
I would expect Application Gateway now be connected to the internal load balancer and send all request over to it. But I get the message, that all backend pools are unhealthy. So it looks like, the Gateway can't access the provided IP.
I took a look at the Azure GIT baseline, but as far as I can see, they using FQDN instead of IP. I am pretty sure it's just some minor configuration issue, but I just can't find it.
I tried already using the Application Gateway as ingress controller (or http routing) and this worked, but I would like to implement it with internal load balancer, I also tried to add health check to the backend nodepool, this did not worked.
EDIT: I changed the LB to public and added the public IP to the Application Gateway and everything worked, so it looks like this is the issue, but I don't get why Application Gateway can't access the sibling subnet. I don't have any restrictions in place and by default Azure allows communication between subnets.
My mistake was to place the internal-load-balancer into the same snet like my kubernetes. When I changed the code and provided its own subnet, everything worked out fine. My final service config:
resource "kubernetes_service" "solr-service" {
metadata {
name = local.solr.name
annotations = {
"service.beta.kubernetes.io/azure-load-balancer-internal" : "true"
"service.beta.kubernetes.io/azure-load-balancer-internal-subnet" : "lb-subnet"
}
}
spec {
external_traffic_policy = "Local"
selector = {
app = kubernetes_deployment.solr.metadata.0.labels.app
}
port {
name = "http"
port = 80
target_port = 8983
}
type = "LoadBalancer"
load_balancer_ip = "192.168.3.200"
}
}
I'm using terraform to distribute eks, rds, and kubernetes and try to run moodle. The image for moodle is using bitnami/moodle:latest.
Both eks and rds work on the same vpc, and the are generated very well. However, when I try to distribute moodle in kubernetes with rds, I experience problems with not accessing the database. What's the problem with connecting databases?
root#master:~/moodle_deploy# kubectl get pod
NAME READY STATUS RESTARTS AGE
moodle-54f477b7f8-nh98h 0/1 CrashLoopBackOff 15 70m
moodle-54f477b7f8-rhzzj 0/1 CrashLoopBackOff 15 70m
root#master:~/moodle_deploy# kubectl logs moodle-54f477b7f8-rhzzj
moodle 06:33:12.43
moodle 06:33:12.43 Welcome to the Bitnami moodle container
moodle 06:33:12.43 Subscribe to project updates by watching https://github.com/bitnami/containers
moodle 06:33:12.43 Submit issues and feature requests at https://github.com/bitnami/containers/issuesmoodle 06:33:12.43
moodle 06:33:12.44 INFO ==> ** Starting Moodle setup **
realpath: /bitnami/apache/conf: No such file or directory
moodle 06:33:12.46 INFO ==> Configuring Apache ServerTokens directive
moodle 06:33:12.49 INFO ==> Configuring PHP options
moodle 06:33:12.49 INFO ==> Setting PHP expose_php option
moodle 06:33:12.51 INFO ==> Validating settings in MYSQL_CLIENT_* env vars
moodle 06:33:12.53 INFO ==> Validating settings in POSTGRESQL_CLIENT_* env vars
moodle 06:33:12.69 INFO ==> Ensuring Moodle directories exist
moodle 06:33:12.78 INFO ==> Trying to connect to the database server
moodle 06:34:12.90 ERROR ==> Could not connect to the database
root#master:~/moodle_deploy# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
moodle-pv-claim Bound pvc-eefbb1d9-a6fd-4cf5-b044-159145798114 1Gi RWO gp2 59m
root#master:~/moodle_deploy# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-eefbb1d9-a6fd-4cf5-b044-159145798114 1Gi RWO Delete Bound default/moodle-pv-claim gp2 59m
These are the code that defined rds and kubernetes among the terraform files I distributed
//SECURITY GROUP
resource "aws_security_group" "secgrp-rds" {
name = "secgrp-rdis"
description = "Allow MySQL Port"
vpc_id = module.vpc.vpc_id
ingress {
description = "Allowing Connection for mysql"
from_port = 3306
to_port = 3306
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "Allowing Connection for SSH"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_db_subnet_group" "sub_ids" {
name = "main"
subnet_ids = module.vpc.private_subnets
tags = {
Name = "DB subnet group"
}
}
//RDS INSTANCE
resource "aws_db_instance" "rds" {
depends_on=[aws_security_group.secgrp-rds]
engine = "mysql"
engine_version = "5.7"
instance_class = "db.t2.micro"
allocated_storage = 20
storage_type = "gp2"
name = "moodledb"
identifier = "moodledb"
username = "root"
password = "test12345"
parameter_group_name = "default.mysql5.7"
#publicly_accessible = true
skip_final_snapshot = true
apply_immediately = true
iam_database_authentication_enabled = true
db_subnet_group_name = aws_db_subnet_group.sub_ids.id
vpc_security_group_ids = [aws_security_group.secgrp-rds.id]
}
//CREATE DEPLOYMENT - provide rds environment variables
resource "kubernetes_deployment" "moodle" {
depends_on = [aws_db_instance.rds]
metadata {
name = "moodle"
labels = {
App = "moodle"
}
}
spec {
replicas = 2
selector {
match_labels = {
App = "moodle"
}
}
template {
metadata {
labels = {
App = "moodle"
}
}
spec {
container {
image = "bitnami/moodle:latest"
name = "moodle"
env{
name = "MOODLE_DATABASE_TYPE"
value = "mysqli"
}
env{
name = "MOODLE_DATABASE_HOST"
value = aws_db_instance.rds.endpoint
}
env{
name = "MOODLE_DATABASE_USER"
value = aws_db_instance.rds.username
}
env{
name = "MOODLE_DATABASE_PASSWORD"
value = aws_db_instance.rds.password
}
env{
name = "MOODLE_DATABASE_NAME"
value = aws_db_instance.rds.db_name
}
port {
container_port = 8080
}
volume_mount {
name = "moodle-ps"
mount_path = "/opt/bitnami/apache2/htdocs/"
}
resources {
limits= {
cpu = "0.5"
memory = "512Mi"
}
requests= {
cpu = "250m"
memory = "50Mi"
}
}
}
volume {
name = "moodle-ps"
persistent_volume_claim {
claim_name = "moodle-pv-claim"
}
}
}
}
}
}
resource "kubernetes_service" "moodle" {
depends_on = [kubernetes_deployment.moodle]
metadata {
name = "moodle"
}
spec {
port {
port = 80
target_port = 8080
}
type = "LoadBalancer"
}
}
resource "kubernetes_storage_class" "kubeSC" {
metadata {
name = "kubese"
}
storage_provisioner = "kubernetes.io/aws-ebs"
reclaim_policy = "Retain"
parameters = {
type = "gp2"
}
}
resource "kubernetes_persistent_volume_claim" "pvc" {
depends_on=[kubernetes_storage_class.kubeSC]
metadata {
name = "moodle-pv-claim"
labels = {
"app" = "moodle"
}
}
spec {
access_modes = ["ReadWriteOnce"]
resources {
requests = {
storage = "1Gi"
}
}
}
}
I'm deploying infrastructure on Azure using Terraform,
I'm using modules for a linux scale set an a load balancer and using azurerm_lb_nat_pool in order to have SSH access to the VMs,
I have a need now to retrieve the ports of the NAT rules for other purposes.
For the life of me I cannot find a way to retrieve them, went through all the terraform documentation and cannot find it under any data source or attribute reference.
Here is my LB code:
resource "azurerm_lb" "front-load-balancer" {
name = "front-load-balancer"
location = var.def-location
resource_group_name = var.rg-name
sku = "Standard"
frontend_ip_configuration {
name = "frontend-IP-configuration"
public_ip_address_id = var.public-ip-id
}
}
resource "azurerm_lb_nat_pool" "lb-nat-pool" {
resource_group_name = var.rg-name
loadbalancer_id = azurerm_lb.front-load-balancer.id
name = "lb-nat-pool"
protocol = "Tcp"
frontend_port_start = var.frontend-port-start
frontend_port_end = var.frontend-port-end
backend_port = 22
frontend_ip_configuration_name = "frontend-IP-configuration"
}
Any assistance would be very appreciated.
EDIT:
I tried exporting the inbound_nat_rules export on the azurerm_lb frontend IP configuration, it gives a list of the resources which I do not currently know how to extract the ports from::
output "frontend-ip-confguration-inbound-nat-rules" {
value = azurerm_lb.front-load-balancer.frontend_ip_configuration[*].inbound_nat_rules
}
Which results in this:
Changes to Outputs:
+ LB-frontend-IP-confguration-Inbound-nat-rules = [
+ [
+ "/subscriptions/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/resourceGroups/weight-tracker-stage-rg/providers/Microsoft.Network/loadBalancers/front-load-balancer/inboundNatRules/lb-nat-pool.3",
+ "/subscriptions/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/resourceGroups/weight-tracker-stage-rg/providers/Microsoft.Network/loadBalancers/front-load-balancer/inboundNatRules/lb-nat-pool.4",
+ "/subscriptions/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/resourceGroups/weight-tracker-stage-rg/providers/Microsoft.Network/loadBalancers/front-load-balancer/inboundNatRules/lb-nat-pool.6",
],
]
The First run of Terraform Apply gives "Error: Unable to start container: Error response from daemon: network vaccine-net not found"
Second run: everything works fine.
I thought Terraform manages the dependencies itself - obviously it fails here. Do I have a mistake in my file? Or can i make dependencies myself?
.tf File:
# Setting up docker network
resource "docker_network" "private_network" {
name = "vaccine-net"
attachable = true
ipam_config {
subnet = "10.0.1.0/24"
gateway = "10.0.1.254"
}
}
# create db container
resource "docker_container" "bootstrapper" {
name = "bootstrapper"
image = "vaccine/bootstrapper"
networks_advanced {
name = "vaccine-net"
ipv4_address ="10.0.1.1"
}
}
Are you sure your network is created before the container is created? I see no link between your network and container. Try with reference to the docker network (preferred):
# Setting up docker network
resource "docker_network" "private_network" {
name = "vaccine-net"
attachable = true
ipam_config {
subnet = "10.0.1.0/24"
gateway = "10.0.1.254"
}
}
# create db container
resource "docker_container" "bootstrapper" {
name = "bootstrapper"
image = "vaccine/bootstrapper"
networks_advanced {
name = "${docker_network.private_network.name}"
ipv4_address ="10.0.1.1"
}
}
Or explicitly with depends_on:
# Setting up docker network
resource "docker_network" "private_network" {
name = "vaccine-net"
attachable = true
ipam_config {
subnet = "10.0.1.0/24"
gateway = "10.0.1.254"
}
}
# create db container
resource "docker_container" "bootstrapper" {
name = "bootstrapper"
image = "vaccine/bootstrapper"
networks_advanced {
name = "vaccine-net"
ipv4_address ="10.0.1.1"
}
depends_on = [docker_network.private_network]
}
I am looking for a way to attach two target group against single ECS services, in other my container exposes two port but I am only able to map one port against my service to LB.
So far I am able to create a new listener and target group but after target group creation I can see everything as per expectation but the target group show There are no targets registered to this target group
Here are my target group and listener configuration
target_group:
resource "aws_lb_target_group" "e_admin" {
name = "${var.env_prefix_name}-admin"
port = 5280
protocol = "HTTP"
vpc_id = "${aws_vpc.VPC.id}"
health_check {
path = "/admin"
healthy_threshold = 2
unhealthy_threshold = 10
port = 5280
timeout = 90
interval = 100
matcher = "401,200"
}
}
Listener:'
resource "aws_lb_listener" "admin" {
load_balancer_arn = "${aws_lb.admin_lb.arn}"
port = "5280"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_lb_target_group.e_admin.id}"
type = "forward"
}
}
My question is how I can add ECS cluster Autoscaling group or how I can add all the instances running in the ECS cluster to this target group?
AWS recently announced support for multiple target groups for an ECS service.
The, currently unreleased, 2.22.0 version of the AWS provider contains support for this by adding more load_balancer blocks to the aws_ecs_service resource. Example from the acceptance tests:
resource "aws_ecs_service" "with_alb" {
name = "example"
cluster = "${aws_ecs_cluster.main.id}"
task_definition = "${aws_ecs_task_definition.with_lb_changes.arn}"
desired_count = 1
iam_role = "${aws_iam_role.ecs_service.name}"
load_balancer {
target_group_arn = "${aws_lb_target_group.test.id}"
container_name = "ghost"
container_port = "2368"
}
load_balancer {
target_group_arn = "${aws_lb_target_group.static.id}"
container_name = "ghost"
container_port = "4501"
}
depends_on = [
"aws_iam_role_policy.ecs_service",
]
}
Accodring to https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html,
There is a limit of one load balancer or target group per service.
If you want to attach a autoscaling group to the target group, use aws_autoscaling_attachment,
https://www.terraform.io/docs/providers/aws/r/autoscaling_attachment.html
resource "aws_autoscaling_attachment" "asg_attachment_bar" {
autoscaling_group_name = "${aws_autoscaling_group.your_asg.id}"
alb_target_group_arn = "${aws_alb_target_group.e_admin.arn}"
}
You can add multiple define multiple target groups for the same ecs service using the load_balancer block.
resource "aws_ecs_service" "ecs_service_1" {
name = "service-1"
cluster = aws_ecs_cluster.ecs_cluster_prod.id
task_definition = aws_ecs_task_definition.ecs_task_definition_1.arn
desired_count = 1
launch_type = "FARGATE"
enable_execute_command = true
# Target group 1
load_balancer {
target_group_arn = aws_lb_target_group.lb_tg_1.arn
container_name = "app"
container_port = 8080
}
# Target group 2
load_balancer {
target_group_arn = aws_lb_target_group.lb_tg_2.arn
container_name = "app"
container_port = 8080
}
network_configuration {
subnets = [aws_subnet.subnet_a.id, aws_subnet.subnet_b.id]
security_groups = [aws_security_group.sg_internal.id]
assign_public_ip = true
}
tags = {
Name = "service-1"
ManagedBy = "terraform"
Environment = "prod"
}
}
You can map the same container and port to both target groups in case of having an external and an internal load balancer for example