Azure Firewall: Most common Azure Firewall Policy Rule Collection Rules - azure

I am asked to configure the Azure Firewall Policy Rule collection with most commonly used Network Rules and Application Rules.
I have gathered the following details where in I have captured the most commonly used Network Rules and Application Rules. However I am not sure if I am missing anything that is considered as the most common rule?
resource "azurerm_firewall_policy_rule_collection_group" "fwpolrcg" {
name = "fwpolicy-rcg"
firewall_policy_id = azurerm_firewall_policy.fwpol.id
priority = 100
network_rule_collection {
name = "network_rule_collection1"
priority = 100
action = "Allow"
rule {
name = "AllowHubToSpokeRDP"
protocols = ["TCP","UDP"]
source_addresses = var.hub_firewall_ip_range
destination_addresses = var.spoke_firewall_ip_range
destination_ports = ["3389"]
}
rule {
name = "AllowSpokeToHubRDP"
protocols = ["TCP","UDP"]
source_addresses = var.spoke_firewall_ip_range
destination_addresses = var.hub_firewall_ip_range
destination_ports = ["3389"]
}
rule {
name = "AllowHubToSpokeHTTPS"
protocols = ["TCP"]
source_addresses = var.hub_firewall_ip_range
destination_addresses = var.spoke_firewall_ip_range
destination_ports = ["443"]
}
rule {
name = "AllowSpokeToHubHTTPS"
protocols = ["TCP"]
source_addresses = var.spoke_firewall_ip_range
destination_addresses = var.hub_firewall_ip_range
destination_ports = ["443"]
}
rule {
name = "AllowHubToSpokeDNS"
protocols = ["TCP","UDP"]
source_addresses = var.hub_firewall_ip_range
destination_addresses = var.spoke_firewall_ip_range
destination_ports = ["53"]
}
rule {
name = "AllowSpokeToHubDNS"
protocols = ["TCP","UDP"]
source_addresses = var.spoke_firewall_ip_range
destination_addresses = var.hub_firewall_ip_range
destination_ports = ["53"]
}
}
application_rule_collection {
name = "application_rule_collection1"
priority = 100
action = "Allow"
rule {
name = "Windows Update"
source_addresses = ["*"]
fqdn_tags = [
"AppServiceEnvironment",
"AzureBackup",
"AzureKubernetesService",
"HDInsight",
"MicrosoftActiveProtectionService",
"WindowsDiagnostics",
"WindowsUpdate",
"WindowsVirtualDesktop"]
}
rule {
name = "AllowMicrosoftFqdns"
source_addresses = ["*"]
destination_fqdns = [
"*.cdn.mscr.io",
"mcr.microsoft.com",
"*.data.mcr.microsoft.com",
"management.azure.com",
"login.microsoftonline.com",
"acs-mirror.azureedge.net",
"dc.services.visualstudio.com",
"*.opinsights.azure.com",
"*.oms.opinsights.azure.com",
"*.microsoftonline.com",
"*.monitoring.azure.com",
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowFqdnsForOsUpdates"
source_addresses = ["*"]
destination_fqdns = [
"download.opensuse.org",
"security.ubuntu.com",
"ntp.ubuntu.com",
"packages.microsoft.com",
"snapcraft.io"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowImagesFqdns"
source_addresses = ["*"]
destination_fqdns = [
"auth.docker.io",
"registry-1.docker.io",
"production.cloudflare.docker.com"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowAzure"
source_addresses = ["*"]
destination_fqdns = [
"*.azure.*"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
}
rule {
name = "AllowBing"
source_addresses = ["*"]
destination_fqdns = [
"*.bing.com"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowGoogle"
source_addresses = ["*"]
destination_fqdns = [
"*.google.com"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
depends_on = [azurerm_firewall_policy.fwpol]
}

I tried to reproduce the same in my environment to create Azure Firewall Policy Rule Collection Rules using Terraform:
Note: Make sure that define all rules in collection section inorder to block or deny the action.
See the document to create Azure Firewall Collection Group using Terraform.
Terraform code:
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "Thejesh" {
name = "Thejesh-resources"
location = "West Europe"
}
resource "azurerm_firewall_policy" "example" {
name = "example-fwpolicy"
resource_group_name = azurerm_resource_group.Thejesh.name
location = azurerm_resource_group.Thejesh.location
}
resource "azurerm_firewall_policy_rule_collection_group" "example" {
name = "example-fwpolicy-rcg"
firewall_policy_id = azurerm_firewall_policy.example.id
priority = 500
application_rule_collection {
name = "app_rule_collection1"
priority = 500
action = "Deny"
rule {
name = "app_rule_collection1_rule1"
protocols {
type = "Http"
port = 80
}
protocols {
type = "Https"
port = 443
}
source_addresses = ["10.0.0.1"]
destination_fqdns = ["*.microsoft.com","*.cdn.mscr.io",
"mcr.microsoft.com",
"*.data.mcr.microsoft.com",
"management.azure.com",
"login.microsoftonline.com",
"acs-mirror.azureedge.net",
"dc.services.visualstudio.com",
"*.opinsights.azure.com",
"*.oms.opinsights.azure.com",
"*.microsoftonline.com",
"*.monitoring.azure.com",]
}
}
network_rule_collection {
name = "network_rule_collection1"
priority = 400
action = "Deny"
rule {
name = "network_rule_collection1_rule1"
protocols = ["TCP", "UDP"]
source_addresses = ["10.0.0.1"]
destination_addresses = ["192.168.1.1", "192.168.1.2"]
destination_ports = ["80", "1000-2000"]
}
}
nat_rule_collection {
name = "nat_rule_collection1"
priority = 300
action = "Dnat"
rule {
name = "nat_rule_collection1_rule1"
protocols = ["TCP", "UDP"]
source_addresses = ["10.0.0.1", "10.0.0.2"]
destination_address = "192.168.1.1"
destination_ports = ["80"]
translated_address = "192.168.0.1"
translated_port = "8080"
}
}
}
Terraform plan:
Terraform Apply
Once ran the code resources created with Azure Firewall Policy.
Rule collection inside Azure Firewall.
Application Rules in Azure Firewall:

Related

in terraform how to set if condition for data resource

How do I set if statement for data aws_acm_certificate. I get the following error when I add count for data aws_acm_certificate. I do not get the same error for resource "aws_lb_listener".
Because data.aws_acm_certificate.acm_certificate has "count" set, its
attributes must be accessed on specific instances.
data "aws_acm_certificate" "acm_certificate" {
count = var.protocol!="TCP" ? 1 : 0
domain = var.certificate_domain_name
most_recent = true
}
resource "aws_lb_listener" "listener_https" {
count = var.protocol!="TCP" ? 1 : 0
load_balancer_arn = var.load_balancer_arn
port = "443"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = data.aws_acm_certificate.acm_certificate.arn
default_action {
type = "forward"
target_group_arn = var.target_group_arn
}
}
Because you have a count on your data, you need to access it as an array
certificate_arn = data.aws_acm_certificate.acm_certificate[0].arn
Alternatively, you could use a for_each for your resource
Something like
resource "aws_lb_listener" "listener_https" {
for_each = data.aws_acm_certificate.acm_certificate
load_balancer_arn = var.load_balancer_arn
port = "443"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = each.value.arn
default_action {
type = "forward"
target_group_arn = var.target_group_arn
}
}

Multiple frontend_enpoint in Azure Front Door with Terraform

I am trying to build an Azure FrontDoor with Terraform but I am having an issue when I am trying to configure two Front Ends and then bind one of them to a custom HTTPS configuration. But I am getting the following error The argument "frontend_endpoint_id" is required, but no definition was found.
I just can't work out how you would specify two Front Door Endpoints and then reference one of them in a custom https config. Code below.
resource "azurerm_frontdoor" "jccroutingrule" {
depends_on = [
cloudflare_record.create_frontdoor_CNAME,
azurerm_key_vault.jctestingenv_keyvault,
azurerm_key_vault_certificate.jcimportedcert
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
#enforce_backend_pools_certificate_name_check = false
routing_rule {
name = "jccroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["jccfrontendendpoint","frontendendpoint2"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "jccbackendpool"
}
}
backend_pool_load_balancing {
name = "jccloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "jcchealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "jccbackendpool"
backend {
host_header = format("portal-staging-westeurope.jason.website")
address = format("portal-staging-westeurope.jason.website")
http_port = 80
https_port = 443
weight = 50
priority = 1
enabled = true
}
load_balancing_name = "jccloadbalancesettings"
health_probe_name = "jcchealthprobesettings"
}
frontend_endpoint {
name = "jccfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = "frontendendpoint2"
host_name = format("portal-staging.jason.website")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = azurerm_frontdoor.jccroutingrule.frontend_endpoint[1].id
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
So from documentation of azurerm_frontdoor here, I see they export below field which I think is of your interest..
frontend_endpoints - A map/dictionary of Frontend Endpoint Names (key)
to the Frontend Endpoint ID (value)
frontend_endpoints is a map object containing endpoint name as key & the id as the value. So, you could make use of lookup function to extract value from the key.
In the end your azurerm_frontdoor_custom_https_configuration looks like below::
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = lookup(azurerm_frontdoor.jccroutingrule.frontend_endpoints, "frontendendpoint2", "what?")
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
In case, if you change your mind to use jccfrontendendpoint endpoint, feel free to put that key into lookup function :-)
from terraform docs:
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"]
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
I fixed this in the end, following this post on github: https://github.com/hashicorp/terraform-provider-azurerm/pull/11456
What I had to in the end was change a couple of things, first I had to change the frontend_endpoint_id to "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}" for some reason you need to make the frontend_endpoint name value into a local variable. So your code will look like this:
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.jason.website")
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault_certificate.jcimportedcert.id
}
}
Now if you build Frontdoor before doing the https_configuration you literally have to destroy your state file, for front door to build and then apply the custom HTTPS config. I could not get this to build without destroying the state file and someone else on the link I shared said the same.
Also the docs are wrong for frontend_endpoint_id if you choose not to use the format I have given and want to do something like:azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"] you must make sure you append .id on the end otherwise it won't look up the key values correctly and you will just get an error. Example: azurerm_frontdoor.jccroutingrule.frontend_endpoint["frontendendpoint2"].id
Also last point to note, you need to change frontend_endpoints under routing rule to include your local value like this: frontend_endpoints = ["jccfrontendendpoint","${local.frontendendpoint2}"] otherwise when you come to https custom config again the lookup will fail.
To be honest this frontdoor config is buggy at best and the docs on it are very vauge and in some places just wrong.
My full config to make it easy to follow:
resource "azurerm_frontdoor" "jccroutingrule" {
depends_on = [
cloudflare_record.create_frontdoor_CNAME,
azurerm_key_vault.jctestingenv_keyvault,
azurerm_key_vault_certificate.jcimportedcert
]
name = "testingfrontdoor"
resource_group_name = azurerm_resource_group.Terraform.name
#enforce_backend_pools_certificate_name_check = false
routing_rule {
name = "jccroutingrule"
accepted_protocols = ["Http", "Https"]
patterns_to_match = ["/*"]
frontend_endpoints = ["jccfrontendendpoint","${local.frontendendpoint2}"]
forwarding_configuration {
forwarding_protocol = "MatchRequest"
backend_pool_name = "jccbackendpool"
}
}
backend_pool_load_balancing {
name = "jccloadbalancesettings"
sample_size = 255
successful_samples_required = 1
}
backend_pool_health_probe {
name = "jcchealthprobesettings"
path = "/health/probe"
protocol = "Https"
interval_in_seconds = 240
}
backend_pool {
name = "jccbackendpool"
backend {
host_header = format("portal-staging-westeurope.jason.website")
address = format("portal-staging-westeurope.jason.website")
http_port = 80
https_port = 443
weight = 50
priority = 1
enabled = true
}
load_balancing_name = "jccloadbalancesettings"
health_probe_name = "jcchealthprobesettings"
}
frontend_endpoint {
name = "jccfrontendendpoint"
host_name = format("testingfrontdoor.azurefd.net")
}
frontend_endpoint {
name = local.frontendendpoint2
host_name = format("portal-staging.jason.website")
}
}
resource "azurerm_frontdoor_custom_https_configuration" "portal_staging_https_config" {
frontend_endpoint_id = "${azurerm_frontdoor.jccroutingrule.id}/frontendEndpoints/${local.frontendendpoint2}"
custom_https_provisioning_enabled = true
custom_https_configuration {
certificate_source = "AzureKeyVault"
azure_key_vault_certificate_secret_name = "imported-cert"
azure_key_vault_certificate_vault_id = azurerm_key_vault.jctestingenv_keyvault.id
}
}

Application gateway request_routing_rules does not exist

I am trying to deploy a azure application gateway. I set the configuration as follow:
resource "azurerm_application_gateway" "demo-app-gateway" {
location = var.location
resource_group_name = azurerm_resource_group.rg-hri-testing-env.name
name = "demo-app-gateway"
autoscale_configuration {
max_capacity = 10
min_capacity = 2
}
frontend_port {
name = "port_443"
port = 443
}
sku {
name = "Standard_v2"
tier = "Standard_v2"
}
frontend_ip_configuration {
name = "appGwPublicFrontendIp"
public_ip_address_id = azurerm_public_ip.demo-app-gateway-public-ip.id
private_ip_address_allocation = "Dynamic"
}
backend_http_settings {
cookie_based_affinity = "Disabled"
name = "demo-http-settings"
port = 443
protocol = "Https"
host_name = "apim.test.com"
pick_host_name_from_backend_address = false
path = "/external/"
request_timeout = 20
probe_name = "demo-apim-probe"
trusted_root_certificate_names = ["demo-trusted-root-ca-certificate"]
}
probe {
interval = 30
name = "demo-apim-probe"
path = "/status-0123456789abcdef"
protocol = "Https"
timeout = 30
unhealthy_threshold = 3
pick_host_name_from_backend_http_settings = true
match {
body = ""
status_code = [
"200-399"
]
}
}
gateway_ip_configuration {
name = "appGatewayIpConfig"
subnet_id = azurerm_subnet.GatewaSubnet.id
}
backend_address_pool {
name = "demo-backend-pool"
}
http_listener {
frontend_ip_configuration_name = "appGwPublicFrontendIp"
frontend_port_name = "port_443"
name = "demo-app-gateway-listener"
protocol = "Https"
require_sni = false
ssl_certificate_name = "demo-app-gateway-certificate"
}
ssl_certificate {
data = filebase64(var.ssl_certificate_path)
name = "demo-app-gateway-certificate"
password = var.ssl_certificate_password
}
trusted_root_certificate {
data = filebase64(var.ssl_certificate_path)
name = "demo-trusted-root-ca-certificate"
}
request_routing_rule {
http_listener_name = "demo-app-gateway-listener"
name = "demo-rule"
rule_type = "Basic"
backend_address_pool_name = "demo-backend-pool"
backend_http_settings_name = "demo-http-setting"
}
}
But when I run terraform apply I get this error.
Error: creating/updating Application Gateway: (Name "demo-app-gateway" / Resource Group "rg-hri-testing-apim"): network.ApplicationGatewaysClient#CreateOrUpdate: Failure sending request: StatusCode=0 -- Original Error: Code="InvalidResourceReference" Message="Resource /subscriptions/my-sub/resourceGroups/rg-hri-testing-apim/providers/Microsoft.Network/applicationGateways/demo-app-gateway/backendHttpSettingsCollection/demo-http-setting referenced by resource /subscriptions/mysub/resourceGroups/rg-hri-testing-apim/providers/Microsoft.Network/applicationGateways/demo-app-gateway/requestRoutingRules/demo-rule was not found. Please make sure that the referenced resource exists, and that both resources are in the same region." Details=[]
on app-gateway-main.tf line 1, in resource "azurerm_application_gateway" "demo-app-gateway":
1: resource "azurerm_application_gateway" "demo-app-gateway" {
The resource causing the error is the request_routing_rule not being found, but what it confuses me is that is looking for it before to create it?
Can anyone please help me to understand what am I doing wrong here?
Please if you need more infos, just let me know.
Thank you very much
Please check the Backend HTTP settings name which is being referenced by request routing rule block. You have to change it to demo-http-settings in request_routing_rule to resolve the error.
Issue:
You are using below as backend http setting :
backend_http_settings {
cookie_based_affinity = "Disabled"
name = "demo-http-settings"
port = 443
protocol = "Https"
host_name = "apim.test.com"
pick_host_name_from_backend_address = false
path = "/external/"
request_timeout = 20
probe_name = "demo-apim-probe"
trusted_root_certificate_names = ["demo-trusted-root-ca-certificate"]
}
But while referencing it in request request routing rule you are using :
request_routing_rule {
http_listener_name = "demo-app-gateway-listener"
name = "demo-rule"
rule_type = "Basic"
backend_address_pool_name = "demo-backend-pool"
backend_http_settings_name = "demo-http-setting"
As you have given the name of backend_http_setting_name = demo-http-settings and giving it as demo-http-setting in request_routing_rule. It will error out as it can't find the backend http setting.

Terraform for aws_alb_listener how to add multiple target_group_arn in default action for type="forward"

My Approach: I have to create an "aws_lb_listener" resource , in the default action I have type = " forward" but I don't have one target_group_arn . I have more than one target_group_arn values.
Below snippet shows frontend-listener but the target_group_arn should include more than one arn values.
resource "aws_alb_listener" "frontend-listener" {
load_balancer_arn = aws_alb.ss_alb.arn
port = "443" #"80"
protocol = "HTTPS"
depends_on = [aws_alb_target_group.aws_alb_target_group]
default_action {
#target_group_arn = aws_alb_target_group.aws_alb_target_group.arn
type = "forward"
}
}
The aws_alb_target_group resource shows multiple target-group being created .
resource "aws_alb_target_group" "aws_alb_target_group" {
for_each = local.ebapp_name
name = "${each.value.name}-tg"
port = 80
protocol = "HTTP"
vpc_id = var.vpc_id
}
I have looked at the terraform documentation but couldn't get a solution. (https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener)
resource "aws_alb_listener_rule" "aws_alb_listener_rule"{
for_each = local.ebapp_name
listener_arn = aws_alb_listener.frontend-listener.arn
action {
type = "forward"
target_group_arn = aws_alb_target_group.aws_alb_target_group[each.value.name].arn
}
condition {
path_pattern {
values = ["/${each.value.name}/*"]
}
}
}
I have also mentioned the Listener rules .
The error is shown with terraform apply command and it is as below:
for actions of type 'forward', you must specify a 'forward' block or 'target_group_arn'
What changes to make to solve this error?
I think you could achieve that using dynamic blocks. For example:
resource "aws_alb_listener" "frontend-listener" {
load_balancer_arn = aws_alb.ss_alb.arn
port = "443" #"80"
protocol = "HTTPS"
depends_on = [aws_alb_target_group.aws_alb_target_group]
default_action {
type = "forward"
forward {
dynamic "target_group" {
for_each = aws_alb_target_group.aws_alb_target_group
content {
arn = target_group.value["arn"]
}
}
}
}
}
The above is example only, and some adjustment may still be required to make it work as expected.
This worked for me. It adds now all three target groups to the defualt action of the listener.
locals {
target_groups = ["1", "2", "3"]
}
resource "aws_lb_listener" "https_to_target_group" {
count = length(local.target_groups)
certificate_arn = aws_acm_certificate.cd.arn
load_balancer_arn = aws_lb.cd.arn
port = var.alb.port
protocol = var.alb.protocol
ssl_policy = var.alb.ssl_policy
default_action {
type = "forward"
forward {
dynamic "target_group" {
for_each = local.target_groups
content {
arn = aws_lb_target_group.cd[target_group.key].arn
}
}
}
}

ECS and Application Load Balancer Ephemeral Ports using Terraform

I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.

Resources