I am deploying a number of AWS application load balancers by feeding a nested map from locals.tf to a module configuring the load-balancers.
locals {
lb_vars = {
alb1 = {
load_balancer_type = application
listener_port = 443
listener_protocol = https
internal = false
subnets = var.subnet1
backends = {
backend1 = {
port = "8080"
path = ["/endpoint1/backend1*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19808
health_check_path = "/health"
health_check_protocol = "http"
},
backend2 = {
port = "8081"
path = ["/endpoint1/backend2*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19809
health_check_path = "/health"
health_check_protocol = "http"
},
alb2 = {
load_balancer_type = application
listener_port = 443
listener_protocol = https
internal = false
subnets = var.subnet1
backends = {
backend1 = {
port = "8082"
path = ["/endpoint2/backend1*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19810
health_check_path = "/health"
health_check_protocol = "http"
},
backend2 = {
port = "8083"
path = ["/endpoint2/backend2*"]
protocol = "http"
protocol_version = "http1"
health_check_enabled = true
health_check_interval = 10
health_check_port = 19811
health_check_path = "/health"
health_check_protocol = "http"
},
}
}
}
}
Resource in load-balancer module:
resource "aws_lb" "lb" {
for_each = var.lb_vars
name = "${each.key}-${var.env_name}"
internal = try(each.value.internal, "false")
load_balancer_type = try(each.value.load_balancer_type, "application")
security_groups = aws_security_group.lb_security_group[each.key]
subnets = each.value.subnets
enable_deletion_protection = false
tags = "Name" = "${each.key}-${var.env_name}"
}
As one can see, there are a number of parameters which I would like to not define for each AWS lb because typically they are the defaults, but if I remove one of the parameters I get the following error;
Error: Invalid value for module argument
The given value is not suitable for child module variable "lb_vars" defined
at lb/variables.tf:41,1-21: all map elements must have the same type.
Load-balancer Module
module "lb" {
source = "./lb"
env_name = var.env_name
full_env_name = local.full_env_name
subnet_ids = local.subnet_ids
vpc_id = data.aws_vpc.vpc.id
external_zone_id = data.aws_route53_zone.external.zone_id
common_tags = local.common_tags
env_cert_arn = data.aws_acm_certificate.wildcard_cert.arn
lb_params = local.lb_params
}
Variables.tf in load-balancer modules (line 41 as per error)
variable "lb_params" {
type = map
description = "LB parameters"
}
Related
getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come
Im having some issues with coding a dynamic block for frontdoor in Terraform. I have found a good working example of one here: https://github.com/spy86/terraform-azure-front-door/blob/main/front_door.tf
Yet my frontdoor setup is not as complex as this persons and I do not need everything he has done on his.
What I am trying to achieve is to put two backend_pools on my front door to enable multiple regions. The only way to do this is to bring in dynamic blocks. Yet when I do this I am getting an error: │ Error: Unsupported attribute │ │ on frontdoor.tf line 96, in resource "azurerm_frontdoor" "jctestingfrontdoor": │ 96: for_each = var.backend_pool_settings.value.backend[*] │ ├──────────────── │ │ var.backend_pool_settings is a list of object, known only after apply │ │ Can't access attributes on a list of objects. Did you mean to access an attribute for a specific element of the list, or across all elements of the list?
Here is my Frontdoor code:
Main.tf
resource "azurerm_frontdoor" "jctestingfrontdoor" {
depends_on = [
azurerm_key_vault.jctestingenv_keyvault,
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
routing_rule {
name = "projroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["projfrontendendpoint", "${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "projbackendpool"
}
}
backend_pool_load_balancing {
name = "projloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "projhealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
dynamic "backend_pool" {
for_each = var.backend_pool_settings[*]
content {
name = var.backend_pool_settings.name
load_balancing_name = var.backend_pool_settings.load_balancing_name
health_probe_name = var.backend_pool_settings.health_probe_name
dynamic "backend" {
for_each = var.backend_pool_settings.backend
content {
address = var.backend_pool_settings.address
host_header = var.backend_pool_settings.host_header
http_port = var.backend_pool_settings.http_port
https_port = var.backend_pool_settings.https_port
priority = var.backend_pool_settings.priority
weight = var.backend_pool_settings.weight
enabled = var.backend_pool_settings.enabled
}
}
}
}
frontend_endpoint {
name = "projfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.terraform.example")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
depends_on = [
azurerm_frontdoor.jctestingfrontdoor
]
frontend_endpoint_id = "${azurerm_frontdoor.jctestingfrontdoor.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault.jctestingenv_keyvault.id
}
}
variables.tf
variable "backend_pool_settings" {
description = "backend pool stettings for frontdoor"
type = object({
name = string
backend = list(object({
address = string
host_header = string
http_port = number
https_port = number
weight = number
priority = number
enabled = bool
}))
load_balancing_name = string
health_probe_name = string
})
}
locals.tf
locals {
frontendendpoint2 = "projfrondoordnsname"
backendpool1 = "uksouth"
backendpool2 = "westeurope"
}
inputvariables.tfvars
backend_pool_settings = (
{
name = "uksouth"
backend = {
address = "portal-staging-testing1.terraform.example"
host_header = "portal-staging-testing1.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projloadbalancesettings"
},
{
name = "westeurope"
backend = {
address = "portal-staging-testing2.terraform.example"
host_header = "portal-staging-testing2.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projloadbalancesettings"
})
I have coded the variables as object lists but I'm not sure if that's the right thing to do and I'm not sure if I should be splitting the backend_pool as two dynamic blocks like in the example.
UPDATE:
After working through my code I have simplified it a bit more,
resource "azurerm_frontdoor" "jctestingfrontdoor" {
depends_on = [
azurerm_key_vault.jctestingenv_keyvault,
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
routing_rule {
name = "projroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["projfrontendendpoint", "${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "projbackendpool"
}
}
backend_pool_load_balancing {
name = "projloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "projhealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "projbackendpool"
dynamic "backend" {
for_each = var.backend_pool_settings.value.backend[*]
content {
address = backend.address
host_header = backend.host_header
http_port = backend.http_port
https_port = backend.https_port
priority = backend.priority
weight = backend.weight
enabled = backend.enabled
}
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projhealthprobesettings"
}
frontend_endpoint {
name = "projfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.terraform.example")
}
}
Now the error im getting is: │ Error: Unsupported attribute │ │ on frontdoor.tf line 96, in resource "azurerm_frontdoor" "jctestingfrontdoor": │ 96: for_each = var.backend_pool_settings.value.backend[*] │ ├──────────────── │ │ var.backend_pool_settings is a list of object, known only after apply │ │ Can't access attributes on a list of objects. Did you mean to access an attribute for a specific element of the list, or across all elements of the list?
I have managed to fix this by playing about with the map variable. Basically, front door does not require the object of the backend to be specified as it already knows its building a backend. Also I played around with a few bits of other code and got this working see my code for example:
mainj.tf
resource "azurerm_frontdoor" "jctestingfrontdoor" {
depends_on = [
azurerm_key_vault.jctestingenv_keyvault,
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.terraform.name
routing_rule {
name = "projroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["projfrontendendpoint", "${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "projbackendpool"
}
}
backend_pool_load_balancing {
name = "projloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "projhealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "projbackendpool"
dynamic "backend" {
for_each = var.backend_pool_settings
content {
address = backend.value.address
host_header = backend.value.host_header
http_port = backend.value.http_port
https_port = backend.value.https_port
priority = backend.value.priority
weight = backend.value.weight
enabled = backend.value.enabled
}
}
load_balancing_name = "projloadbalancesettings"
health_probe_name = "projhealthprobesettings"
}
frontend_endpoint {
name = "projfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.terraform.example")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
depends_on = [
azurerm_frontdoor.jctestingfrontdoor
]
frontend_endpoint_id = "${azurerm_frontdoor.jctestingfrontdoor.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault.jctestingenv_keyvault.id
}
}
variables.tf
variable "backend_pool_settings" {
description = "backend pool stettings for frontdoor"
type = map(object({
address = string
host_header = string
http_port = number
https_port = number
weight = number
priority = number
enabled = bool
}))
}
inputvariables.tfvars
backend_pool_settings = {
backendone = {
address = "portal-staging-testing1.terraform.example"
host_header = "portal-staging-testing1.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
},
backendtwo = {
address = "portal-staging-testing2.terraform.example"
host_header = "portal-staging-testing2.terraform.example"
http_port = 80
https_port = 443
priority = 1
weight = 50
enabled = true
}
}
This post also helped me to figure out messing about with map objects with Terraform: https://serverfault.com/questions/1063395/terraform-values-from-tfvars-are-not-loading-when-using-multi-level-maps
I am trying to build an Azure FrontDoor with Terraform but I am having an issue when I am trying to configure two Front Ends and then bind one of them to a custom HTTPS configuration. But I am getting the following error The argument "frontend_endpoint_id" is required, but no definition was found.
I just can't work out how you would specify two Front Door Endpoints and then reference one of them in a custom https config. Code below.
resource "azurerm_frontdoor" "jccroutingrule" {
depends_on = [
cloudflare_record.create_frontdoor_CNAME,
azurerm_key_vault.jctestingenv_keyvault,
azurerm_key_vault_certificate.jcimportedcert
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
#enforce_backend_pools_certificate_name_check = false
routing_rule {
name = "jccroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["jccfrontendendpoint","frontendendpoint2"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "jccbackendpool"
}
}
backend_pool_load_balancing {
name = "jccloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "jcchealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "jccbackendpool"
backend {
host_header = format("portal-staging-westeurope.jason.website")
address = format("portal-staging-westeurope.jason.website")
http_port = 80
https_port = 443
weight = 50
priority = 1
enabled = true
}
load_balancing_name = "jccloadbalancesettings"
health_probe_name = "jcchealthprobesettings"
}
frontend_endpoint {
name = "jccfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = "frontendendpoint2"
host_name = format("portal-staging.jason.website")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = azurerm_frontdoor.jccroutingrule.frontend_endpoint[1].id
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
So from documentation of azurerm_frontdoor here, I see they export below field which I think is of your interest..
frontend_endpoints - A map/dictionary of Frontend Endpoint Names (key)
to the Frontend Endpoint ID (value)
frontend_endpoints is a map object containing endpoint name as key & the id as the value. So, you could make use of lookup function to extract value from the key.
In the end your azurerm_frontdoor_custom_https_configuration looks like below::
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = lookup(azurerm_frontdoor.jccroutingrule.frontend_endpoints, "frontendendpoint2", "what?")
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
In case, if you change your mind to use jccfrontendendpoint endpoint, feel free to put that key into lookup function :-)
from terraform docs:
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"]
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
I fixed this in the end, following this post on github: https://github.com/hashicorp/terraform-provider-azurerm/pull/11456
What I had to in the end was change a couple of things, first I had to change the frontend_endpoint_id to "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}" for some reason you need to make the frontend_endpoint name value into a local variable. So your code will look like this:
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.jason.website")
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
Now if you build Frontdoor before doing the https_configuration you literally have to destroy your state file, for front door to build and then apply the custom HTTPS config. I could not get this to build without destroying the state file and someone else on the link I shared said the same.
Also the docs are wrong for frontend_endpoint_id if you choose not to use the format I have given and want to do something like:azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"] you must make sure you append .id on the end otherwise it won't look up the key values correctly and you will just get an error. Example: azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"].id
Also last point to note, you need to change frontend_endpoints under routing rule to include your local value like this: frontend_endpoints = ["jccfrontendendpoint","${local.frontendendpoint2}"] otherwise when you come to https custom config again the lookup will fail.
To be honest this frontdoor config is buggy at best and the docs on it are very vauge and in some places just wrong.
My full config to make it easy to follow:
resource "azurerm_frontdoor" "jccroutingrule" {
depends_on = [
cloudflare_record.create_frontdoor_CNAME,
azurerm_key_vault.jctestingenv_keyvault,
azurerm_key_vault_certificate.jcimportedcert
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
#enforce_backend_pools_certificate_name_check = false
routing_rule {
name = "jccroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["jccfrontendendpoint","${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "jccbackendpool"
}
}
backend_pool_load_balancing {
name = "jccloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "jcchealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "jccbackendpool"
backend {
host_header = format("portal-staging-westeurope.jason.website")
address = format("portal-staging-westeurope.jason.website")
http_port = 80
https_port = 443
weight = 50
priority = 1
enabled = true
}
load_balancing_name = "jccloadbalancesettings"
health_probe_name = "jcchealthprobesettings"
}
frontend_endpoint {
name = "jccfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.jason.website")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault.jctestingenv_keyvault.id
}
}
Is there a way to use the below list in a for loop and add in the target_groups ? I am trying to use the prefix with target_groups variable in a for-loop. I have tested also for_each. The target_groups expects the list format but the for_each does not give that expected result.
variable "prefix" {
description = "NLB Prefix"
type = any
default = "test-target"
}
variable "target_groups" {
description = "NLB"
type = any
default = {
tg1 = {
name_prefix = "test"
backend_protocol = "TCP"
backend_port = 443
target_type = "ip"
deregistration_delay = 10
preserve_client_ip = true
stickiness = {
enabled = true
type = "source_ip"
}
targets = {
appl1 = {
target_id = "191.11.11.11"
port = 443
}
}
},
}
}
}
I tried the list below for_each
module "g-appl_nlb" {
source = "../../modules/compute/lb"
name = format("%s-g-appl-nlb", var.name_prefix)
load_balancer_type = "network"
vpc_id = data.aws_vpc.target_vpc.id
...
target_groups = [
for_each = var.target_groups
name_previs = var.prefix
backend_protocol = each.value["backend_protocol"]
backend_port = each.value["backend_port"]
target_type = each.value["target_type"]
deregistration_delay = each.value["deregistration_delay"]
preserve_client_ip = each.value["preserve_client_ip"]
stickiness = each.value["stickiness"]
]
....
Basically, I managed the solved my request with the below approach.
locals {
target_groups = flatten([
for tg_data in var.target_groups: {
name_prefix = "var.name_prefix"
backend_protocol = tg_data.backend_protocol
backend_port = tg_data.backend_port
target_type = tg_data.target_type
deregistration_delay = tg_data.deregistration_delay
preserve_client_ip = tg_data.preserve_client_ip
....
])
}
module "g-appl_nlb" {
source = "../../modules/compute/lb"
name = format("%s-g-appl-nlb", var.name_prefix)
load_balancer_type = "network"
vpc_id = data.aws_vpc.target_vpc.id
...
target_groups = local.target_groups
I have 3 services called valid,jsc,test and each having 3 instances in 3 zones.Now i have to create a target group for each service and attach instances to the same.Now i don't know how to combine the ports 80,443 with service names to create a target group
variable "service-names" {
type = list
default = ["valid","jsc","test"]
}
variable "net-lb-ports" {
type = map(number)
default = {
TCP = 80
TCP = 443
}
}
Now i have to combine this service-names variable (list) and net-lb-ports(map) for creating target group
I was able to do below but for only 1 port and that also by hartcoded the values
resource "aws_lb_target_group" "ecom-nlb-tgp" {
for_each = toset(var.service-names)
name = "${each.value}-nlbtgp"
port = 80
protocol = "TCP"
vpc_id = aws_vpc.ecom-vpc.id
target_type = "instance"
deregistration_delay = 90
health_check {
interval = 30
port = 80
protocol = "TCP"
healthy_threshold = 3
unhealthy_threshold = 3
}
tags = {
"Name" = "${each.value}-nlb-tgp"
}
So total i need 6 target groups which 3 (service-names) * 2 ports(80,443)
Please guide me
I changed the variable as like below and was able to create the target groups
variable net-lb-ports {
type = map
default = {
port1 = {
port = 80
protocol = "TCP"
}
port2 = {
port = 443
protocol = "TCP"
}
}
}
locals {
merged_lbport_svc = flatten([
for s in var.service-names : [
for v in var.net-lb-ports : {
service = s
port = v.port
protocol = v.protocol
}
]
])
}
resource "aws_lb_target_group" "ecom-nlb-tgp" {
for_each = {for idx,svc in local.merged_lbport_svc : "${svc.service}-${svc.port}-${svc.protocol}" => svc}
#for_each = {for idx,svc in local.merged_lbport_svc : idx => svc}
name = "ecom-${each.value.service}-${each.value.port}-${each.value.protocol}-nlbtgp"
port = each.value.port
protocol = each.value.protocol
vpc_id = aws_vpc.ecom-vpc.id
target_type = "instance"
deregistration_delay = 90
health_check {
interval = 30
port = each.value.port
protocol = each.value.protocol
healthy_threshold = 3
unhealthy_threshold = 3
}
tags = {
"Name" = "ecom-${each.value.service}-${each.value.port}-${each.value.protocol}-nlbtgp"
}
}