Create azure application gateway with static private ip address via terraform - terraform

I can't find a way to create an application gateway via terraform with private IP without manually inserting hard coded IP private address.
I tried:
Create a private IP in the application gateway subnet - failed because Azure blocks (attached error from the UI, but terraform raises the same error) it
Create a dynamic private IP in the application gateway subnet - Failed
Only when creating an application gateway with hard coded ip address it works.
This solution is not good enough for me because we handle many environents and we don't want to relay on developers to remember adding a private IP.
Is there a good solution?

Application Gateway v2 SKU supports the static VIP type exclusively, whereas the V1 SKU can be configured to support static or dynamic internal IP address and dynamic public IP address.
Refer: Application Gateway frontend-ip-addresses
Application Gateway V2 currently does not support only private IP mode. The Azure Application Gateway V2 SKU can be configured to support either both static internal IP address and static public IP address, or only static public IP address. It cannot be configured to support only static internal IP address.
Refer: Application gateway v2 with only private-ip
While deploying using terraform, we should define two frontend_ip_configuration blocks, one is used for public IP configuration, another is used for private IP configuration.
Scenario 1: When trying to create a new application gateway with dynamic private IP and dynamic public IP using terraform it gets created for Standard or V1 SKU only.
terraform {
  required_providers {
    azurerm = {
      source  = "hashicorp/azurerm"
      version = "~> 2.65"
    }
  }
  required_version = ">= 0.14.9"
}
provider "azurerm" {
  features {}
}
resource "azurerm_resource_group" "test" {
  name     = "Terraformtest"
  location = "West Europe"
}
resource "azurerm_virtual_network" "test" {
  name                = "terraformvnet"
  resource_group_name = azurerm_resource_group.test.name
  location            = azurerm_resource_group.test.location
  address_space       = ["10.254.0.0/16"]
}
resource "azurerm_subnet" "frontend" {
  name                 = "frontend"
  resource_group_name  = azurerm_resource_group.test.name
  virtual_network_name = azurerm_virtual_network.test.name
  address_prefixes     = ["10.254.0.0/24"]
}
resource "azurerm_subnet" "backend" {
  name                 = "backend"
  resource_group_name  = azurerm_resource_group.test.name
  virtual_network_name = azurerm_virtual_network.test.name
  address_prefixes     = ["10.254.2.0/24"]
}
resource "azurerm_public_ip" "test" {
  name                = "test-pip"
  resource_group_name = azurerm_resource_group.test.name
  location            = azurerm_resource_group.test.location
  allocation_method   = "Dynamic"
}
locals {
  backend_address_pool_name      = "${azurerm_virtual_network.test.name}-beap"
  frontend_port_name             = "${azurerm_virtual_network.test.name}-feport"
  frontend_ip_configuration_name = "${azurerm_virtual_network.test.name}-feip"
  http_setting_name              = "${azurerm_virtual_network.test.name}-be-htst"
  listener_name                  = "${azurerm_virtual_network.test.name}-httplstn"
  request_routing_rule_name      = "${azurerm_virtual_network.test.name}-rqrt"
  redirect_configuration_name    = "${azurerm_virtual_network.test.name}-rdrcfg"
}
resource "azurerm_application_gateway" "network" {
  name                = "test-appgateway"
  resource_group_name = "${azurerm_resource_group.test.name}"
  location            = "${azurerm_resource_group.test.location}"
  sku {
    name     = "Standard_Small"
    tier     = "Standard"
    capacity = 2
  }
  gateway_ip_configuration {
    name      = "my-gateway-ip-configuration"
    subnet_id = "${azurerm_subnet.frontend.id}"
  }
  frontend_port {
    name = "${local.frontend_port_name}"
    port = 80
  }
  frontend_ip_configuration {
    name                 = "${local.frontend_ip_configuration_name}"
    public_ip_address_id = "${azurerm_public_ip.test.id}"
  }
 frontend_ip_configuration {
    name                 = "${local.frontend_ip_configuration_name}-private"
    subnet_id = "${azurerm_subnet.frontend.id}"
    private_ip_address_allocation = "Dynamic"
  }
  backend_address_pool {
    name = "${local.backend_address_pool_name}"
  }
  backend_http_settings {
    name                  = "${local.http_setting_name}"
    cookie_based_affinity = "Disabled"
    path                  = "/path1/"
    port                  = 80
    protocol              = "Http"
    request_timeout       = 1
  }
  http_listener {
    name                           = "${local.listener_name}"
    frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}-private"
    frontend_port_name             = "${local.frontend_port_name}"
    protocol                       = "Http"
  }
  request_routing_rule {
    name                       = "${local.request_routing_rule_name}"
    rule_type                  = "Basic"
    http_listener_name         = "${local.listener_name}"
    backend_address_pool_name  = "${local.backend_address_pool_name}"
    backend_http_settings_name = "${local.http_setting_name}"
  }
}
Scenario 2: While creating a Standard V2 we can create a private IP but it doesn’t support dynamic allocation yet so it must be static, and you must mention the IP address you want to use. and to use that you must select standard sku for public IP and static IP address allocation for public as well.
z
So, after updating private_ip_address_allocation = "Static" and private_ip_address = "10.254.0.10" it will get created successfully.
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 2.65"
}
}
required_version = ">= 0.14.9"
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "test" {
name = "Terraformtest"
location = "West Europe"
}
resource "azurerm_virtual_network" "test" {
name = "terraformvnet"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
address_space = ["10.254.0.0/16"]
}
resource "azurerm_subnet" "frontend" {
name = "frontend"
resource_group_name = azurerm_resource_group.test.name
virtual_network_name = azurerm_virtual_network.test.name
address_prefixes = ["10.254.0.0/24"]
}
resource "azurerm_subnet" "backend" {
name = "backend"
resource_group_name = azurerm_resource_group.test.name
virtual_network_name = azurerm_virtual_network.test.name
address_prefixes = ["10.254.2.0/24"]
}
resource "azurerm_public_ip" "test" {
name = "test-pip"
resource_group_name = azurerm_resource_group.test.name
location = azurerm_resource_group.test.location
allocation_method = "Static"
sku = "Standard"
}
locals {
backend_address_pool_name = "${azurerm_virtual_network.test.name}-beap"
frontend_port_name = "${azurerm_virtual_network.test.name}-feport"
frontend_ip_configuration_name = "${azurerm_virtual_network.test.name}-feip"
http_setting_name = "${azurerm_virtual_network.test.name}-be-htst"
listener_name = "${azurerm_virtual_network.test.name}-httplstn"
request_routing_rule_name = "${azurerm_virtual_network.test.name}-rqrt"
redirect_configuration_name = "${azurerm_virtual_network.test.name}-rdrcfg"
}
resource "azurerm_application_gateway" "network" {
name = "test-appgateway"
resource_group_name = "${azurerm_resource_group.test.name}"
location = "${azurerm_resource_group.test.location}"
sku {
name = "Standard_v2"
tier = "Standard_v2"
capacity = 2
}
gateway_ip_configuration {
name = "my-gateway-ip-configuration"
subnet_id = "${azurerm_subnet.frontend.id}"
}
frontend_port {
name = "${local.frontend_port_name}"
port = 80
}
frontend_ip_configuration {
name = "${local.frontend_ip_configuration_name}"
public_ip_address_id = "${azurerm_public_ip.test.id}"
}
frontend_ip_configuration {
name = "${local.frontend_ip_configuration_name}-private"
subnet_id = "${azurerm_subnet.frontend.id}"
private_ip_address_allocation = "Static"
private_ip_address = "10.254.0.10"
}
backend_address_pool {
name = "${local.backend_address_pool_name}"
}
backend_http_settings {
name = "${local.http_setting_name}"
cookie_based_affinity = "Disabled"
path = "/path1/"
port = 80
protocol = "Http"
request_timeout = 1
}
http_listener {
name = "${local.listener_name}"
frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}"
frontend_port_name = "${local.frontend_port_name}"
protocol = "Http"
}
request_routing_rule {
name = "${local.request_routing_rule_name}"
rule_type = "Basic"
http_listener_name = "${local.listener_name}"
backend_address_pool_name = "${local.backend_address_pool_name}"
backend_http_settings_name = "${local.http_setting_name}"
}
}
Note : 2 application gateway cannot use same subnet . So if you are creating a new appgw then you have to create a new subnet.

Can you paste your terraform code?
For the latest terraform version documentation say that block frontend_ip_configuration supports private_ip_address_allocation parameter, which can hold value Dynamic.
Also remember that app gateway has to have a separate network with only application gateway in it. I am not sure, but I suppose that it is gateway per subnet, so 2 gateways in one subnet is impossible.

Related

Azure Kubernetes Service: Setup an Internal load balancer with static IP address

I have an Azure kubernetes cluster created using the following Terraform code
# Required Provider
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.0.2"
}
}
required_version = ">= 1.1.0"
}
data "azurerm_client_config" "current" {}
provider "azurerm" {
subscription_id = local.subscription_id
tenant_id = local.tenant_id
client_id = local.client_id
client_secret = local.client_secret
features {}
}
resource "random_pet" "rg-name" {
prefix = var.resource_group_name_prefix
}
resource "azurerm_resource_group" "rg" {
name = random_pet.rg-name.id
location = var.resource_group_location
}
resource "azurerm_virtual_network" "test" {
name = var.virtual_network_name
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
address_space = [var.virtual_network_address_prefix]
subnet {
name = var.aks_subnet_name
address_prefix = var.aks_subnet_address_prefix
}
tags = var.tags
}
data "azurerm_subnet" "kubesubnet" {
name = var.aks_subnet_name
virtual_network_name = azurerm_virtual_network.test.name
resource_group_name = azurerm_resource_group.rg.name
depends_on = [azurerm_virtual_network.test]
}
resource "azurerm_kubernetes_cluster" "k8s" {
name = var.aks_name
location = azurerm_resource_group.rg.location
dns_prefix = var.aks_dns_prefix
private_cluster_enabled = var.private_cluster
resource_group_name = azurerm_resource_group.rg.name
http_application_routing_enabled = false
linux_profile {
admin_username = var.vm_user_name
ssh_key {
key_data = file(var.public_ssh_key_path)
}
}
default_node_pool {
name = "agentpool"
node_count = var.aks_agent_count
vm_size = var.aks_agent_vm_size
os_disk_size_gb = var.aks_agent_os_disk_size
vnet_subnet_id = data.azurerm_subnet.kubesubnet.id
}
service_principal {
client_id = local.client_id
client_secret = local.client_secret
}
network_profile {
network_plugin = "azure"
dns_service_ip = var.aks_dns_service_ip
docker_bridge_cidr = var.aks_docker_bridge_cidr
service_cidr = var.aks_service_cidr
load_balancer_sku = "standard"
}
# Enabled the cluster configuration to the Azure kubernets with RBAC
azure_active_directory_role_based_access_control {
managed = var.azure_active_directory_role_based_access_control_managed
admin_group_object_ids = var.active_directory_role_based_access_control_admin_group_object_ids
azure_rbac_enabled = var.azure_rbac_enabled
}
timeouts {
create = "20m"
delete = "20m"
}
depends_on = [data.azurerm_subnet.kubesubnet,module.log_analytics_workspace]
tags = var.tags
}
It creates the Load Balancer with Public IP as shown below
However, I don't want to have a public IP for the Load balancer instead it should have the Internal Private IP.
What should I to do have this load balancer with Internal Private IP and Service is not exposed over the Internet using the Public IP?
Note: As per the Microsoft documentation, even if you annotate with annotations: service.beta.kubernetes.io/azure-load-balancer-internal: "true", external IP is still assigned which I am trying to avoid.
The load balancer that gets created with the AKS cluster (usually called kubernetes) is used for egress (not ingress) traffic and is a public LB, and it cannot be private. This is part of the outbound type configuration.
The "outbound type" of the AKS cluster can be set to "LoadBalancer, UserDefinedRouting or managedNatGateway". if you choose any option other than LB, then you would need to configure your network to route the traffic externally. check this doc for more info.
For ingress traffic, you have the choice to use public or private LB. This is configured in the service resource (of type LoadBalancer) under kubernetes, where you would use the annotation that you mentioned to create a private LB. Public rules will use the same public LB created with the cluster.
You can set the private Ip of the LB using annotations as well:
annotations:
service.beta.kubernetes.io/azure-load-balancer-ipv4: 10.240.0.25
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
This is mentioned in the same doc that you shared.

Registering a private endpoint in Azure private DNS automatically using Terraform

I have an existing private DNS zone called privatelink.file.core.windows.net that is linked to a virtual network.
I have created a Terraform template that creates a storage account and a private endpoint for said storage account that connects to the virtual network mentioned above. When the resources are created I've noticed that it doesn't register in the private DNS zone automatically. Instead I've had to manually create a private DNS A record, I would prefer this to happen automatically, how does one do this?
Storage Account Creation
resource "azurerm_storage_account" "st" {
name = var.st.name
resource_group_name = var.rg_shared_name
location = var.rg_shared_location
account_tier = var.st.tier
account_replication_type = var.st.replication
}
Private Endpoint Creation
# PRIVATE ENDPOINT FOR STORAGE ACCOUNT
resource "azurerm_private_endpoint" "pe" {
name = var.pe.name
resource_group_name = var.rg_shared_name
location = var.rg_shared_location
subnet_id = var.subnet_id
private_service_connection {
name = "test"
private_connection_resource_id = azurerm_storage_account.st.id
is_manual_connection = false
subresource_names = ["file"]
}
}
Manual Creation of DNS Record
resource "azurerm_private_dns_a_record" "st_fqdn" {
name = azurerm_storage_account.st.name
zone_name = "privatelink.file.core.windows.net"
resource_group_name = "rg-hub-shared-core-dns-uks-001"
ttl = 300
records = ["172.17.208.4"]
}
I have resolve this issue, I missed private_dns_zone_group within the azurerm_private_endpoint resource block. Once I added this code it populated Azure private DNS automatically.
Source: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/private_endpoint
The code below is what I changed and added the section private_dns_zone_group.
# PRIVATE ENDPOINT FOR STORAGE ACCOUNT
resource "azurerm_private_endpoint" "pe" {
name = var.pe.name
resource_group_name = var.rg_shared_name
location = var.rg_shared_location
subnet_id = var.subnet_id
private_dns_zone_group {
name = "add_to_azure_private_dns"
private_dns_zone_ids = ["/subscriptions/d5f2dcf8-ab3f-47aa-9ec3-9c5aba4b909f/resourceGroups/rg-hub-shared-core-dns-uks-001/providers/Microsoft.Network/privateDnsZones/privatelink.file.core.windows.net"]
}
private_service_connection {
name = "connect_to_storage_account"
private_connection_resource_id = azurerm_storage_account.st.id
is_manual_connection = false
subresource_names = ["file"]
}
}

Failed to create aks using existing vnet

I'm trying to create aks using terraform; the catch is I have already vnet and subnet created, I need to have the cluster created in that network.
When executing this code I'm getting an error:
locals {
environment = "prod"
resource_group = "hnk_rg_poc"
vnet_subnet_cidr = ["10.3.1.0/24"]
}
#Existing vnet with address space "10.3.1.0/24"
data "azurerm_virtual_network" "existing-vnet" {
name = "${var.vnet}"
resource_group_name = local.resource_group
}
#subnets
resource "azurerm_subnet" "vnet_subnet_id" {
name = "${var.vnet_subnet_id}"
resource_group_name = local.resource_group
address_prefixes = local.vnet_subnet_cidr
virtual_network_name = data.azurerm_virtual_network.existing-vnet.name
}
vnet_subnet_id = data.azurerm_subnet.vnet_subnet_id.id
As you are already having a existing Vnet and Subnet to be used by the AKS cluster , you have to use data block instead of resource block for the subnet.
You can use the below to create a basic aks cluster using your existing Vnet and Subnet:
provider "azurerm" {
features {}
}
#local vars
locals {
environment = "test"
resource_group = "resource_group_name"
name_prefix = "name-aks"
}
#Existing vnet with address space
data "azurerm_virtual_network" "base" {
name = "existing-vnet"
resource_group_name = local.resource_group
}
#existing subnet to be used by aks
data "azurerm_subnet" "aks" {
name = "existing-subnet"
resource_group_name = local.resource_group
virtual_network_name = data.azurerm_virtual_network.base.name
}
#kubernetes_cluster
resource "azurerm_kubernetes_cluster" "base" {
name = "${local.name_prefix}-${local.environment}"
location = data.azurerm_virtual_network.base.location
resource_group_name = data.azurerm_virtual_network.base.resource_group_name
dns_prefix = "dns-${local.name_prefix}-${local.environment}"
network_profile {
network_plugin = "azure"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
vnet_subnet_id = data.azurerm_subnet.aks.id
}
identity {
type = "SystemAssigned"
}
}
Output: (Terraform Plan)

Unable to create Azure AKS Cluster using existing VNET and Subnets

I'm trying to build an AKS cluster in Azure using Terraform. However, I do not want AKS deployed into its own VNET and Subnet, I already have built a subnet within a vnet that I want it to use. When trying to just give it the subnet ID, I get an overlapping CIDER issue. My networking is:
VNET: 10.0.0.0/16
Subnets: 10.0.1.0/24, 10.0.2.0/24, and 10.0.3.0/24. I need AKS to use the 10.0.1.0./24 subnet within this VNET. However, my Terraform config is trying to use a CIDR of 10.0.0.0/16, which is an obviouis conflict. I don't know how to fix this issue inside of Terraform, with the portal I can just choose the vnet/subnet for AKS. Below is my Terraform configuration which generates the error:
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=2.46.0"
}
}
}
# Configure the Microsoft Azure Provider
provider "azurerm" {
features {}
subscription_id = "####"
tenant_id = "####"
}
locals {
azure_location = "East US"
azure_location_short = "eastus"
}
resource "azurerm_resource_group" "primary_vnet_resource_group" {
name = "vnet-prod-002-eastus-001"
location = local.azure_location
}
resource "azurerm_virtual_network" "primary_vnet_virtual_network" {
name = "vnet_primary_eastus-001"
location = local.azure_location
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_space = ["10.0.0.0/16"]
}
resource "azurerm_subnet" "aks-subnet" {
name = "snet-aks-prod-002-eastus-001"
# location = local.azure_location
virtual_network_name = azurerm_virtual_network.primary_vnet_virtual_network.name
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_prefixes = ["10.0.1.0/24"]
}
output "aks_subnet_id" {
value = azurerm_subnet.aks-subnet.id
}
resource "azurerm_subnet" "application-subnet" {
name = "snet-app-prod-002-eastus-001"
# location = local.azure_location
virtual_network_name = azurerm_virtual_network.primary_vnet_virtual_network.name
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_subnet" "postgres-subnet" {
name = "snet-postgres-prod-002-eastus-001"
# location = local.azure_location
virtual_network_name = azurerm_virtual_network.primary_vnet_virtual_network.name
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_prefixes = ["10.0.3.0/24"]
}
output "postgres_subnet_id" {
value = azurerm_subnet.postgres-subnet.id
}
resource "azurerm_kubernetes_cluster" "aks-prod-002-eastus-001" {
name = "aks-prod-002-eastus-001"
location = local.azure_location
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
dns_prefix = "aks-prod-002-eastus-001"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
vnet_subnet_id = azurerm_subnet.aks-subnet.id
}
network_profile {
network_plugin = "azure"
}
identity {
type = "SystemAssigned"
}
addon_profile {
aci_connector_linux {
enabled = false
}
azure_policy {
enabled = false
}
http_application_routing {
enabled = false
}
oms_agent {
enabled = false
}
}
}
I'm not a Terraform expert and really need a hand with this if anyone knows how to accomplish this. I've been up and down the documentation and I can find a way to specify the subnet id but that's about all I can do. If I don't specify the subnet id then everything is built, but there is a new vnet created which is what I don't want.
Thanks in advance
All the following properties need to be set under network_profile as following:
network_profile {
network_plugin = "azure"
network_policy = "azure"
service_cidr = "10.0.4.0/24"
dns_service_ip = "10.0.4.10"
docker_bridge_cidr = "172.17.0.1/16"
}
These were missed, I hope this helps anyone who is having problems similar to mine.
More info about this block can be found here: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#network_plugin

Terraform private azure load balancer issue

Iam trying to deploy an infrastructure with a private loadbalancer:
.....
resource "azurerm_lb" "private" {
name = "${var.name}-${var.live}-private-lb"
location = data.azurerm_resource_group.rg.location
resource_group_name = data.azurerm_resource_group.rg.name
sku = var.sku
frontend_ip_configuration {
name = "frontend"
subnet_id = var.subnet_id != "" ? var.subnet_id : null
private_ip_address = (var.subnet_id != "" && var.private_ip != "") ? var.private_ip : null
private_ip_address_allocation = var.subnet_id != "" ? (var.subnet_id == "" ? "Static" : "Dynamic") : null
}
}
......
But i got the error message :
..../frontendIPConfigurations/frontend must reference either a Subnet, Public IP Address or Public IP Prefix." Details=[]
Why and how can i tackle this issue ? I don't know which configuration is missing.
thanks
An internal Load Balancer differs from a public Load Balancer, it has been assigned to a subnet and does not have a public IP address. As the error displayed, the frontend should reference either a Subnet, Public IP Address or Public IP Prefix, and the subnet should have existed when you reference. You could use the data source subnet to access information about an existing resource or create your subnet and VNet for your load balancer.
For example, the following can work for me.
data "azurerm_resource_group" "rg" {
name = "mytestrg"
}
variable "sku" {
default = "basic"
}
variable "private_ip" {
default = "172.19.0.100"
}
variable "env" {
default="Static"
}
data "azurerm_subnet" "test" {
name = "default"
virtual_network_name = "vnet1"
resource_group_name = "${data.azurerm_resource_group.rg.name}"
}
resource "azurerm_lb" "test" {
name = "mytestlb"
location = "${data.azurerm_resource_group.rg.location}"
resource_group_name = "${data.azurerm_resource_group.rg.name}"
sku = "${var.sku}"
frontend_ip_configuration {
name = "frontend"
subnet_id = "${data.azurerm_subnet.test.id}"
private_ip_address = "${var.env=="Static"? var.private_ip: null}"
private_ip_address_allocation = "${var.env=="Static"? "Static": "Dynamic"}"
}
}

Resources