Azure Frontdoor Dynamic Block not working in Terraform - azure

Im having some issues with coding a dynamic block for frontdoor in Terraform. I have found a good working example of one here: https://github.com/spy86/terraform-azure-front-door/blob/main/front_door.tf
Yet my frontdoor setup is not as complex as this persons and I do not need everything he has done on his.
What I am trying to achieve is to put two backend_pools on my front door to enable multiple regions. The only way to do this is to bring in dynamic blocks. Yet when I do this I am getting an error: │ Error: Unsupported attribute │ │ on frontdoor.tf line 96, in resource "azurerm_frontdoor" "jctestingfrontdoor": │ 96: for_each = var.backend_pool_settings.value.backend[*] │ ├──────────────── │ │ var.backend_pool_settings is a list of object, known only after apply │ │ Can't access attributes on a list of objects. Did you mean to access an attribute for a specific element of the list, or across all elements of the list?
Here is my Frontdoor code:
Main.tf
resource "azurerm_frontdoor" "jctestingfrontdoor" {
depends_on = [
azurerm_key_vault.jctestingenv_keyvault,
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
routing_rule {
name = "projroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["projfrontendendpoint", "${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "projbackendpool"
}
}
backend_pool_load_balancing {
name = "projloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "projhealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
dynamic "backend_pool" {
for_each = var.backend_pool_settings[*]
content {
name = var.backend_pool_settings.name
load_balancing_name = var.backend_pool_settings.load_balancing_name
health_probe_name = var.backend_pool_settings.health_probe_name
dynamic "backend" {
for_each = var.backend_pool_settings.backend
content {
address = var.backend_pool_settings.address
host_header = var.backend_pool_settings.host_header
http_port = var.backend_pool_settings.http_port
https_port = var.backend_pool_settings.https_port
priority = var.backend_pool_settings.priority
weight = var.backend_pool_settings.weight
enabled = var.backend_pool_settings.enabled
}
}
}
}
frontend_endpoint {
name = "projfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.terraform.example")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
depends_on = [
azurerm_frontdoor.jctestingfrontdoor
]
frontend_endpoint_id = "${azurerm_frontdoor.jctestingfrontdoor.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault.jctestingenv_keyvault.id
}
}
variables.tf
variable "backend_pool_settings" {
description = "backend pool stettings for frontdoor"
type = object({
name = string
backend = list(object({
address = string
host_header = string
http_port = number
https_port = number
weight = number
priority = number
enabled = bool
}))
load_balancing_name = string
health_probe_name = string
})
}
locals.tf
locals {
frontendendpoint2 = "projfrondoordnsname"
backendpool1 = "uksouth"
backendpool2 = "westeurope"
}
inputvariables.tfvars
backend_pool_settings = (
{
name = "uksouth"
backend = {
address = "portal-staging-testing1.terraform.example"
host_header = "portal-staging-testing1.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projloadbalancesettings"
},
{
name = "westeurope"
backend = {
address = "portal-staging-testing2.terraform.example"
host_header = "portal-staging-testing2.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projloadbalancesettings"
})
I have coded the variables as object lists but I'm not sure if that's the right thing to do and I'm not sure if I should be splitting the backend_pool as two dynamic blocks like in the example.
UPDATE:
After working through my code I have simplified it a bit more,
resource "azurerm_frontdoor" "jctestingfrontdoor" {
depends_on = [
azurerm_key_vault.jctestingenv_keyvault,
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
routing_rule {
name = "projroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["projfrontendendpoint", "${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "projbackendpool"
}
}
backend_pool_load_balancing {
name = "projloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "projhealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "projbackendpool"
dynamic "backend" {
for_each = var.backend_pool_settings.value.backend[*]
content {
address = backend.address
host_header = backend.host_header
http_port = backend.http_port
https_port = backend.https_port
priority = backend.priority
weight = backend.weight
enabled = backend.enabled
}
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projhealthprobesettings"
}
frontend_endpoint {
name = "projfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.terraform.example")
}
}
Now the error im getting is: │ Error: Unsupported attribute │ │ on frontdoor.tf line 96, in resource "azurerm_frontdoor" "jctestingfrontdoor": │ 96: for_each = var.backend_pool_settings.value.backend[*] │ ├──────────────── │ │ var.backend_pool_settings is a list of object, known only after apply │ │ Can't access attributes on a list of objects. Did you mean to access an attribute for a specific element of the list, or across all elements of the list?

I have managed to fix this by playing about with the map variable. Basically, front door does not require the object of the backend to be specified as it already knows its building a backend. Also I played around with a few bits of other code and got this working see my code for example:
mainj.tf
resource "azurerm_frontdoor" "jctestingfrontdoor" {
depends_on = [
azurerm_key_vault.jctestingenv_keyvault,
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.terraform.name
routing_rule {
name = "projroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["projfrontendendpoint", "${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "projbackendpool"
}
}
backend_pool_load_balancing {
name = "projloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "projhealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "projbackendpool"
dynamic "backend" {
for_each = var.backend_pool_settings
content {
address = backend.value.address
host_header = backend.value.host_header
http_port = backend.value.http_port
https_port = backend.value.https_port
priority = backend.value.priority
weight = backend.value.weight
enabled = backend.value.enabled
}
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projhealthprobesettings"
}
frontend_endpoint {
name = "projfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.terraform.example")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
depends_on = [
azurerm_frontdoor.jctestingfrontdoor
]
frontend_endpoint_id = "${azurerm_frontdoor.jctestingfrontdoor.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault.jctestingenv_keyvault.id
}
}
variables.tf
variable "backend_pool_settings" {
description = "backend pool stettings for frontdoor"
type = map(object({
address = string
host_header = string
http_port = number
https_port = number
weight = number
priority = number
enabled = bool
}))
}
inputvariables.tfvars
backend_pool_settings = {
backendone = {
address = "portal-staging-testing1.terraform.example"
host_header = "portal-staging-testing1.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
},
backendtwo = {
address = "portal-staging-testing2.terraform.example"
host_header = "portal-staging-testing2.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
}
}
This post also helped me to figure out messing about with map objects with Terraform: https://serverfault.com/questions/1063395/terraform-values-from-tfvars-are-not-loading-when-using-multi-level-maps

Related

How to merge/combine two variables in to one variable in terraform?

I've been trying to merge/combine two variables into a single one (different one as : var1+var2 = merged into var3)
I am trying to create a for_each loop on the code and my variables are :
variable "apps" {
type = map(object({
app_name = string
labels = map(string)
annotations = map(string)
image = string
}))
default = {
"app1_name" = {
app_name = "app1_name"
labels = {
"name" = "stream-frontend"
"tier" = "web"
"owner" = "product"
}
annotations = {
"serviceClass" = "web-frontend"
"loadBalancer/class" = "external"
}
image = "nxinx"
}
"app2_name" = {
app_name = "app2_name"
labels = {
"name" = "stream-frontend"
"tier" = "web"
"owner" = "product"
}
annotations = {
"serviceClass" = "web-frontend"
"loadBalancer/class" = "external"
}
image = "nginx"
}
"app3_name" = {
app_name = "app3_name"
labels = {
"name" = "stream-database"
"tier" = "shared"
"owner" = "product"
}
annotations = {
"serviceClass" = "disabled"
"loadBalancer/class" = "disabled"
}
image = "Mongo"
}
}
}
variable "acl" {
type = map(object({
acl_name = string
ingress = string
egress = string
port = string
protocol = string
}))
default = {
"frontend" = {
acl_name = "acl_frontend"
ingress = "stream-frontend"
egress = "0.0.0.0/0"
port = "80"
protocol = "TCP"
},
"backend" = {
acl_name = "acl_backend"
ingress = "stream-backend"
egress = "0.0.0.0/0"
port = "80"
"protocol" = "TCP"
},
"database" = {
acl_name = "acl_database"
"ingress" = "stream-database"
"egress" = "172.17.0.0/24"
"port" = "27017"
"protocol" = "TCP"
}
}
}
Making a for_each loop to access values of the variables etc.
resource "kubernetes_network_policy" "acl" {
for_each = var.merged_vars
metadata {
name = format("%s-acl", each.value.acl_name)
namespace = each.value.acl_name
}
spec {
policy_types = ["Ingress", "Egress"]
pod_selector {
match_labels = {
tier = each.value.labels.tier
}
}
ingress {
from {
namespace_selector {
match_labels = {
name = each.value.ingress
}
}
}
ports {
port = each.value.port
protocol = each.value.protocol
}
}
egress {
to {
ip_block {
cidr = each.value.egress
}
}
}
}
}
eventually I need to have a way to access the "apps" and "acl" parameters as "var.apps.labels" and "var.acl.port" etc.
thank you for your help!
been trying:
variable "merged_vars" {
default = merge(var.apps, var.acl)
}
and the result i got is :
│ Error: Function calls not allowed
│
│ on NewVars.tf line 96, in variable "merged_vars":
│ 96: default = merge(var.apps, var.acl)
│
│ Functions may not be called here.
alo tried with concat and got the same result
As the error states, you can't create dynamic variables. But you can create local variables dynamically. So you can do:
locals {
merged_vars = merge(var.apps, var.acl)
}
And for the for_each you will use local:
for_each = local.merged_vars

Multiple frontend_enpoint in Azure Front Door with Terraform

I am trying to build an Azure FrontDoor with Terraform but I am having an issue when I am trying to configure two Front Ends and then bind one of them to a custom HTTPS configuration. But I am getting the following error The argument "frontend_endpoint_id" is required, but no definition was found.
I just can't work out how you would specify two Front Door Endpoints and then reference one of them in a custom https config. Code below.
resource "azurerm_frontdoor" "jccroutingrule" {
depends_on = [
cloudflare_record.create_frontdoor_CNAME,
azurerm_key_vault.jctestingenv_keyvault,
azurerm_key_vault_certificate.jcimportedcert
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
#enforce_backend_pools_certificate_name_check = false
routing_rule {
name = "jccroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["jccfrontendendpoint","frontendendpoint2"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "jccbackendpool"
}
}
backend_pool_load_balancing {
name = "jccloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "jcchealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "jccbackendpool"
backend {
host_header = format("portal-staging-westeurope.jason.website")
address = format("portal-staging-westeurope.jason.website")
http_port = 80
https_port = 443
weight = 50
priority = 1
enabled = true
}
load_balancing_name = "jccloadbalancesettings"
health_probe_name = "jcchealthprobesettings"
}
frontend_endpoint {
name = "jccfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = "frontendendpoint2"
host_name = format("portal-staging.jason.website")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = azurerm_frontdoor.jccroutingrule.frontend_endpoint[1].id
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
So from documentation of azurerm_frontdoor here, I see they export below field which I think is of your interest..
frontend_endpoints - A map/dictionary of Frontend Endpoint Names (key)
to the Frontend Endpoint ID (value)
frontend_endpoints is a map object containing endpoint name as key & the id as the value. So, you could make use of lookup function to extract value from the key.
In the end your azurerm_frontdoor_custom_https_configuration looks like below::
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = lookup(azurerm_frontdoor.jccroutingrule.frontend_endpoints, "frontendendpoint2", "what?")
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
In case, if you change your mind to use jccfrontendendpoint endpoint, feel free to put that key into lookup function :-)
from terraform docs:
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"]
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
I fixed this in the end, following this post on github: https://github.com/hashicorp/terraform-provider-azurerm/pull/11456
What I had to in the end was change a couple of things, first I had to change the frontend_endpoint_id to "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}" for some reason you need to make the frontend_endpoint name value into a local variable. So your code will look like this:
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.jason.website")
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
Now if you build Frontdoor before doing the https_configuration you literally have to destroy your state file, for front door to build and then apply the custom HTTPS config. I could not get this to build without destroying the state file and someone else on the link I shared said the same.
Also the docs are wrong for frontend_endpoint_id if you choose not to use the format I have given and want to do something like:azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"] you must make sure you append .id on the end otherwise it won't look up the key values correctly and you will just get an error. Example: azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"].id
Also last point to note, you need to change frontend_endpoints under routing rule to include your local value like this: frontend_endpoints = ["jccfrontendendpoint","${local.frontendendpoint2}"] otherwise when you come to https custom config again the lookup will fail.
To be honest this frontdoor config is buggy at best and the docs on it are very vauge and in some places just wrong.
My full config to make it easy to follow:
resource "azurerm_frontdoor" "jccroutingrule" {
depends_on = [
cloudflare_record.create_frontdoor_CNAME,
azurerm_key_vault.jctestingenv_keyvault,
azurerm_key_vault_certificate.jcimportedcert
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
#enforce_backend_pools_certificate_name_check = false
routing_rule {
name = "jccroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["jccfrontendendpoint","${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "jccbackendpool"
}
}
backend_pool_load_balancing {
name = "jccloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "jcchealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "jccbackendpool"
backend {
host_header = format("portal-staging-westeurope.jason.website")
address = format("portal-staging-westeurope.jason.website")
http_port = 80
https_port = 443
weight = 50
priority = 1
enabled = true
}
load_balancing_name = "jccloadbalancesettings"
health_probe_name = "jcchealthprobesettings"
}
frontend_endpoint {
name = "jccfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.jason.website")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault.jctestingenv_keyvault.id
}
}

terraform for loop list for target_groups with a combine variable

Is there a way to use the below list in a for loop and add in the target_groups ? I am trying to use the prefix with target_groups variable in a for-loop. I have tested also for_each. The target_groups expects the list format but the for_each does not give that expected result.
variable "prefix" {
description = "NLB Prefix"
type = any
default = "test-target"
}
variable "target_groups" {
description = "NLB"
type = any
default = {
tg1 = {
name_prefix = "test"
backend_protocol = "TCP"
backend_port = 443
target_type = "ip"
deregistration_delay = 10
preserve_client_ip = true
stickiness = {
enabled = true
type = "source_ip"
}
targets = {
appl1 = {
target_id = "191.11.11.11"
port = 443
}
}
},
}
}
}
I tried the list below for_each
module "g-appl_nlb" {
source = "../../modules/compute/lb"
name = format("%s-g-appl-nlb", var.name_prefix)
load_balancer_type = "network"
vpc_id = data.aws_vpc.target_vpc.id
...
target_groups = [
for_each = var.target_groups
name_previs = var.prefix
backend_protocol = each.value["backend_protocol"]
backend_port = each.value["backend_port"]
target_type = each.value["target_type"]
deregistration_delay = each.value["deregistration_delay"]
preserve_client_ip = each.value["preserve_client_ip"]
stickiness = each.value["stickiness"]
]
....
Basically, I managed the solved my request with the below approach.
locals {
target_groups = flatten([
for tg_data in var.target_groups: {
name_prefix = "var.name_prefix"
backend_protocol = tg_data.backend_protocol
backend_port = tg_data.backend_port
target_type = tg_data.target_type
deregistration_delay = tg_data.deregistration_delay
preserve_client_ip = tg_data.preserve_client_ip
....
])
}
module "g-appl_nlb" {
source = "../../modules/compute/lb"
name = format("%s-g-appl-nlb", var.name_prefix)
load_balancer_type = "network"
vpc_id = data.aws_vpc.target_vpc.id
...
target_groups = local.target_groups

Terraform Nested maps of different sizes

I am deploying a number of AWS application load balancers by feeding a nested map from locals.tf to a module configuring the load-balancers.
locals {
lb_vars = {
alb1 = {
load_balancer_type = application
listener_port = 443
listener_protocol = https
internal = false
subnets = var.subnet1
backends = {
backend1 = {
port = "8080"
path = ["/endpoint1/backend1*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19808
health_check_path = "/health"
health_check_protocol = "http"
},
backend2 = {
port = "8081"
path = ["/endpoint1/backend2*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19809
health_check_path = "/health"
health_check_protocol = "http"
},
alb2 = {
load_balancer_type = application
listener_port = 443
listener_protocol = https
internal = false
subnets = var.subnet1
backends = {
backend1 = {
port = "8082"
path = ["/endpoint2/backend1*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19810
health_check_path = "/health"
health_check_protocol = "http"
},
backend2 = {
port = "8083"
path = ["/endpoint2/backend2*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19811
health_check_path = "/health"
health_check_protocol = "http"
},
}
}
}
}
Resource in load-balancer module:
resource "aws_lb" "lb" {
for_each = var.lb_vars
name = "${each.key}-${var.env_name}"
internal = try(each.value.internal, "false")
load_balancer_type = try(each.value.load_balancer_type, "application")
security_groups = aws_security_group.lb_security_group[each.key]
subnets = each.value.subnets
enable_deletion_protection = false
tags = "Name" = "${each.key}-${var.env_name}"
}
As one can see, there are a number of parameters which I would like to not define for each AWS lb because typically they are the defaults, but if I remove one of the parameters I get the following error;
Error: Invalid value for module argument
The given value is not suitable for child module variable "lb_vars" defined
at lb/variables.tf:41,1-21: all map elements must have the same type.
Load-balancer Module
module "lb" {
source = "./lb"
env_name = var.env_name
full_env_name = local.full_env_name
subnet_ids = local.subnet_ids
vpc_id = data.aws_vpc.vpc.id
external_zone_id = data.aws_route53_zone.external.zone_id
common_tags = local.common_tags
env_cert_arn = data.aws_acm_certificate.wildcard_cert.arn
lb_params = local.lb_params
}
Variables.tf in load-balancer modules (line 41 as per error)
variable "lb_params" {
type = map
description = "LB parameters"
}

ECS and Application Load Balancer Ephemeral Ports using Terraform

I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.

Resources