I'm Setting up AKS cluster using userDefinedRouting with existing subnet and route table which are associated with network security group. Here is my code snippet.
provider "azurerm" {
version = "~> 2.25"
features {}
}
data "azurerm_resource_group" "aks" {
name = var.resource_group
}
#fetch existing subnet
data "azurerm_subnet" "aks" {
name = var.subnetname
virtual_network_name = var.virtual_network_name
resource_group_name = var.vnet_resource_group
}
resource "azurerm_network_interface" "k8svmnic" {
name = "k8svmnic"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.aks.id
private_ip_address_allocation = "Static"
private_ip_address = var.k8svmip #"10.9.56.10"
}
}
resource "azurerm_availability_set" "k8svmavset" {
name = "k8svmavset"
location = data.azurerm_resource_group.aks.location
resource_group_name = data.azurerm_resource_group.aks.name
platform_fault_domain_count = 3
platform_update_domain_count = 3
managed = true
}
resource "azurerm_network_security_group" "k8svmnsg" {
name = "k8vm-nsg"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
security_rule {
name = "allow_kube_tls"
protocol = "Tcp"
priority = 100
direction = "Inbound"
access = "Allow"
source_address_prefix = "VirtualNetwork"
destination_address_prefix = "*"
source_port_range = "*"
#destination_port_range = "443"
destination_port_ranges = ["443"]
description = "Allow kube-apiserver (tls) traffic to master"
}
security_rule {
name = "allow_ssh"
protocol = "Tcp"
priority = 101
direction = "Inbound"
access = "Allow"
source_address_prefix = "*"
destination_address_prefix = "*"
source_port_range = "*"
#destination_port_range = "22"
destination_port_ranges = ["22"]
description = "Allow SSH traffic to master"
}
}
resource "azurerm_network_interface_security_group_association" "k8svmnicnsg" {
network_interface_id = azurerm_network_interface.k8svmnic.id
network_security_group_id = azurerm_network_security_group.k8svmnsg.id
}
resource "azurerm_linux_virtual_machine" "k8svm" {
name = "k8svm"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
size = "Standard_D3_v2"
admin_username = var.admin_username
disable_password_authentication = true
availability_set_id = azurerm_availability_set.k8svmavset.id
network_interface_ids = [
azurerm_network_interface.k8svmnic.id,
]
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_key
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
disk_size_gb = 30
}
source_image_reference {
publisher = "microsoft-aks"
offer = "aks"
sku = "aks-engine-ubuntu-1804-202007"
version = "2020.07.24"
}
}
resource "azurerm_managed_disk" "k8svm-disk" {
name = "${azurerm_linux_virtual_machine.k8svm.name}-disk"
location = data.azurerm_resource_group.aks.location
resource_group_name = data.azurerm_resource_group.aks.name
storage_account_type = "Standard_LRS"
create_option = "Empty"
disk_size_gb = 512
}
resource "azurerm_virtual_machine_data_disk_attachment" "k8svm-disk-attachment" {
managed_disk_id = azurerm_managed_disk.k8svm-disk.id
virtual_machine_id = azurerm_linux_virtual_machine.k8svm.id
lun = 5
caching = "ReadWrite"
}
resource "azurerm_public_ip" "aks" {
name = "akspip"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
allocation_method = "Static"
sku = "Standard"
depends_on = [azurerm_virtual_machine_data_disk_attachment.k8svm-disk-attachment]
}
resource "azurerm_route_table" "aks"{
name = "aks" #var.subnetname
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
disable_bgp_route_propagation = false
route {
name = "default_route"
address_prefix = "0.0.0.0/0"
next_hop_type = "VirtualAppliance"
next_hop_in_ip_address = var.k8svmip
}
route {
name = var.route_name
address_prefix = var.route_address_prefix
next_hop_type = var.route_next_hop_type
}
}
resource "azurerm_subnet_route_table_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
route_table_id = azurerm_route_table.aks.id
}
resource "azurerm_subnet_network_security_group_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
network_security_group_id = var.network_security_group
}
resource "null_resource" "previous" {}
resource "time_sleep" "wait_90_seconds" {
depends_on = [null_resource.previous]
create_duration = "90s"
}
# This resource will create (at least) 30 seconds after null_resource.previous
resource "null_resource" "next" {
depends_on = [time_sleep.wait_90_seconds]
}
resource "azurerm_kubernetes_cluster" "aks" {
name = data.azurerm_resource_group.aks.name
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
dns_prefix = "akstfelk" #The dns_prefix must contain between 3 and 45 characters, and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number.
kubernetes_version = "1.18.8"
private_cluster_enabled = false
node_resource_group = var.node_resource_group
#api_server_authorized_ip_ranges = [] #var.api_server_authorized_ip_ranges
default_node_pool {
enable_node_public_ip = false
name = "agentpool"
node_count = var.node_count
orchestrator_version = "1.18.8"
vm_size = var.vm_size
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = data.azurerm_subnet.aks.id
type = "VirtualMachineScaleSets"
}
linux_profile {
admin_username = var.admin_username
ssh_key {
key_data = var.ssh_key
}
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
role_based_access_control {
enabled = true
}
network_profile {
network_plugin = "kubenet"
network_policy = "calico"
dns_service_ip = "172.16.1.10"
service_cidr = "172.16.0.0/16"
docker_bridge_cidr = "172.17.0.1/16"
pod_cidr = "172.40.0.0/16"
outbound_type = "userDefinedRouting"
load_balancer_sku = "Standard"
load_balancer_profile {
outbound_ip_address_ids = [ "${azurerm_public_ip.aks.id}" ]
}
# load_balancer_profile {
# managed_outbound_ip_count = 5
# #effective_outbound_ips = [ azurerm_public_ip.aks.id ]
# outbound_ip_address_ids = []
# outbound_ip_prefix_ids = []
# outbound_ports_allocated = 0
# }
}
addon_profile {
aci_connector_linux {
enabled = false
}
azure_policy {
enabled = false
}
http_application_routing {
enabled = false
}
kube_dashboard {
enabled = false
}
oms_agent {
enabled = false
}
}
depends_on = [azurerm_subnet_route_table_association.aks]
}
According to Azure doc it says: "By default, one public IP will automatically be created in the same resource group as the AKS cluster, if NO public IP, public IP prefix, or number of IPs is specified.
But in my case outbound connection not happening Hence cluster provision getting failed. I've even created another public Ip and trying through Loadbalancer profile but i'm getting below error.
Error: "network_profile.0.load_balancer_profile.0.managed_outbound_ip_count": conflicts with network_profile.0.load_balancer_profile.0.outbound_ip_address_ids
If i've removed loadbalancer_profile from script i'm getting below error
Error: creating Managed Kubernetes Cluster "aks-tf" (Resource Group "aks-tf"): containerservice.ManagedClustersClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="InvalidUserDefinedRoutingWithLoadBalancerProfile" Message="UserDefinedRouting and load balancer profile are mutually exclusive. Please refer to http://aka.ms/aks/outboundtype for more details" Target="networkProfile.loadBalancerProfile"
Kinldy help me where i'm missing .
Any help would be appreciated.
When you use the UserDefineRouting, you need to set the network_plugin as azure and put the AKS cluster inside the subnet with the user-defined router, here is the description:
The AKS cluster must be deployed into an existing virtual network with
a subnet that has been previously configured.
And if the network_plugin is set to azure, then the vnet_subnet_id field in the default_node_pool block must be set and pod_cidr must not be set. You can find this note in azurerm_kubernetes_cluster.
Update:
It's a little more complex than you think, here is the Network Architecture of it and steps to create it via CLI. This architecture requires explicitly sending egress traffic to an appliance like a firewall, gateway, proxy or to allow the Network Address Translation (NAT) to be done by a public IP assigned to the standard load balancer or appliance.
For the outbound, instead of a Public Load Balancer you can use an internal Load Balancer for internal traffic.
In addition, some steps you cannot achieve via the Terraform, for example, the Azure Firewall. Take a look at the steps and prepare the resources which you cannot achieve via the CLI.
Related
I am trying to create an AKS cluster in azure using the terraform. My requirements are as follows:
Create a site-to-site VPN connection where the gateway in the subnet of range 172.30.0.0/16 - This is done
Install Azure AKS cluster with Azure CNI and pod's should be in the range of VPN CIDR (172.30.0.0/16).
Here's my terraform code. I read that if you use azure as your network_policy and network_plugin, you can't set the pod_cidr - source
Then how can I do this so my PODs can reach the on-premise network through the site-to-site vpn?
resource "azurerm_kubernetes_cluster" "k8s_cluster" {
lifecycle {
ignore_changes = [
default_node_pool[0].node_count
]
prevent_destroy = false
}
name = var.cluster_name
location = var.location
resource_group_name = var.rg_name
dns_prefix = var.dns_prefix
kubernetes_version = var.kubernetes_version
# node_resource_group = var.resource_group_name
default_node_pool {
name = var.default_node_pool.name
node_count = var.default_node_pool.node_count
max_count = var.default_node_pool.max_count
min_count = var.default_node_pool.min_count
vm_size = var.default_node_pool.vm_size
os_disk_size_gb = var.default_node_pool.os_disk_size_gb
# vnet_subnet_id = var.vnet_subnet_id
max_pods = var.default_node_pool.max_pods
type = var.default_node_pool.agent_pool_type
enable_node_public_ip = var.default_node_pool.enable_node_public_ip
enable_auto_scaling = var.default_node_pool.enable_auto_scaling
tags = merge(var.common_tags)
}
identity {
type = var.identity
}
network_profile {
network_plugin = var.network_plugin #azure
network_policy = var.network_policy #"azure"
load_balancer_sku = var.load_balancer_sku #"standard"
# pod_cidr = var.pod_cidr | When network_plugin is set to azure - the vnet_subnet_id field in the default_node_pool block must be set and pod_cidr must not be set.
}
tags = merge(var.common_tags)
}
# AKS cluster related variables
cluster_name = "test-cluster"
dns_prefix = "testjana"
kubernetes_version = "1.22.15"
default_node_pool = {
name = "masternp" # for system pods
node_count = 1
vm_size = "standard_e4bds_v5" # 4 vcpu and 32 Gb of memory
enable_auto_scaling = false
enable_node_public_ip = false
min_count = null
max_count = null
max_pods = 100
os_disk_size_gb = 80
agent_pool_type = "VirtualMachineScaleSets"
}
admin_username = "jananathadmin"
ssh_public_key = "public_key"
identity = "SystemAssigned"
network_plugin = "azure"
network_policy = "azure"
load_balancer_sku = "standard"
Default, all PODs in AKS will communicate each other, when we want to restrict the traffic, network policies can be used to allow or deny traffic between pods.
Here is the tutorial link
Reproduced the same via terraform using below code snippet to connect cluster with azure CNI and a vnet gateway which links our on-prem environment to azure via a site-to-site VPN.
Step1:
main tf file as follows
resource "azurerm_resource_group" "example" {
name = "*****-****"
location = "East US"
}
resource "azurerm_role_assignment" "role_acrpull" {
scope = azurerm_container_registry.acr.id
role_definition_name = "AcrPull"
principal_id = azurerm_kubernetes_cluster.demo.kubelet_identity.0.object_id
}
resource "azurerm_container_registry" "acr" {
name = "acrswarna"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
sku = "Standard"
admin_enabled = false
}
resource "azurerm_virtual_network" "puvnet" {
name = "Publics_VNET"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
address_space = ["10.19.0.0/16"]
}
resource "azurerm_subnet" "example" {
name = "GatewaySubnet"
resource_group_name = azurerm_resource_group.example.name
virtual_network_name = azurerm_virtual_network.puvnet.name
address_prefixes = ["10.19.3.0/24"]
}
resource "azurerm_subnet" "osubnet" {
name = "Outer_Subnet"
resource_group_name = azurerm_resource_group.example.name
address_prefixes = ["10.19.1.0/24"]
virtual_network_name = azurerm_virtual_network.puvnet.name
}
resource "azurerm_kubernetes_cluster" "demo" {
name = "demo-aksnew"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "demo-aks"
default_node_pool {
name = "default"
node_count = 2
vm_size = "standard_e4bds_v5"
type = "VirtualMachineScaleSets"
enable_auto_scaling = false
min_count = null
max_count = null
max_pods = 100
//vnet_subnet_id = azurerm_subnet.osubnet.id
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = "azure"
load_balancer_sku = "standard"
network_policy = "azure"
}
tags = {
Environment = "Development"
}
}
resource "azurerm_public_ip" "example" {
name = "pips-firewall"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_virtual_network_gateway" "example" {
name = "test"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
type = "Vpn"
vpn_type = "RouteBased"
active_active = false
enable_bgp = false
sku = "VpnGw1"
ip_configuration {
name = "vnetGatewayConfig"
public_ip_address_id = azurerm_public_ip.example.id
private_ip_address_allocation = "Dynamic"
subnet_id = azurerm_subnet.example.id
}
vpn_client_configuration {
address_space = ["172.30.0.0/16"]
root_certificate {
name = "******-****-ID-Root-CA"
public_cert_data = <<EOF
**Use certificate here**
EOF
}
revoked_certificate {
name = "*****-Global-Root-CA"
thumbprint = "****************"
}
}
}
NOTE: Update root certificate configuration by own on above code.
Provider tf file as follow
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=3.0.0"
}
}
}
provider "azurerm" {
features {}
skip_provider_registration = true
}
upon running
terraform plan
terraform apply -auto-approve
Vnet and SubNet configurations
Virtual Network Gateway configuraiton as follows.
Deployed sample pods on cluster
I need to test my azure private-endpoint using the following scenario.
We have a virtual netwroks with two sub-nets (vm_subnet and storage_account_subnet)
The virtual-machine (vm) should be able to connect to the storage-account using a private-link.
So, the below image explains the scenario:
and then i need to test my endpoint using the below manual test-case:
Connect to the azure virtual-machine using ssh and putty username: adminuser and password: P#$$w0rd1234!
In the terminal ping formuleinsstorage.blob.core.windows.net (Expect to see the ip of storage account in the range of storage_account_subnet (10.0.2.0/24))
I deploy all the infrastructure using the below Terraform code:
provider "azurerm" {
features {
resource_group {
prevent_deletion_if_contains_resources = false
}
}
}
resource "azurerm_resource_group" "main_resource_group" {
name = "RG-Terraform-on-Azure"
location = "West Europe"
}
# Create Virtual-Network
resource "azurerm_virtual_network" "virtual_network" {
name = "Vnet"
address_space = ["10.0.0.0/16"]
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
}
# Create subnet for virtual-machine
resource "azurerm_subnet" "virtual_network_subnet" {
name = "vm_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.1.0/24"]
}
# Create subnet for storage account
resource "azurerm_subnet" "storage_account_subnet" {
name = "storage_account_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.2.0/24"]
}
# Create Linux Virtual machine
resource "azurerm_linux_virtual_machine" "example" {
name = "example-machine"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
size = "Standard_F2"
admin_username = "adminuser"
admin_password = "14394Las?"
disable_password_authentication = false
network_interface_ids = [
azurerm_network_interface.virtual_machine_network_interface.id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
}
resource "azurerm_network_interface" "virtual_machine_network_interface" {
name = "vm-nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.vm_public_ip.id
}
}
# Create Network-interface and public-ip for virtual-machien
resource "azurerm_public_ip" "vm_public_ip" {
name = "vm-public-ip-for-rdp"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_network_interface" "virtual_network_nic" {
name = "vm_nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "testconfiguration1"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
}
}
# Setup an Inbound rule because we need to connect to the virtual-machine using RDP (remote-desktop-protocol)
resource "azurerm_network_security_group" "traffic_rules" {
name = "vm_traffic_rules"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
security_rule {
name = "virtual_network_permission"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "private_nsg_asso" {
subnet_id = azurerm_subnet.virtual_network_subnet.id
network_security_group_id = azurerm_network_security_group.traffic_rules.id
}
# Setup storage_account and its container
resource "azurerm_storage_account" "storage_account" {
name = "storagaccountfortest"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
account_tier = "Standard"
account_replication_type = "LRS"
account_kind = "StorageV2"
is_hns_enabled = "true"
}
resource "azurerm_storage_data_lake_gen2_filesystem" "data_lake_storage" {
name = "rawdata"
storage_account_id = azurerm_storage_account.storage_account.id
lifecycle {
prevent_destroy = false
}
}
# Setup DNS zone
resource "azurerm_private_dns_zone" "dns_zone" {
name = "privatelink.blob.core.windows.net"
resource_group_name = azurerm_resource_group.main_resource_group.name
}
resource "azurerm_private_dns_zone_virtual_network_link" "network_link" {
name = "vnet_link"
resource_group_name = azurerm_resource_group.main_resource_group.name
private_dns_zone_name = azurerm_private_dns_zone.dns_zone.name
virtual_network_id = azurerm_virtual_network.virtual_network.id
}
# Setup private-link
resource "azurerm_private_endpoint" "endpoint" {
name = "storage-private-endpoint"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
subnet_id = azurerm_subnet.storage_account_subnet.id
private_service_connection {
name = "private-service-connection"
private_connection_resource_id = azurerm_storage_account.storage_account.id
is_manual_connection = false
subresource_names = ["blob"]
}
}
resource "azurerm_private_dns_a_record" "dns_a" {
name = "dns-record"
zone_name = azurerm_private_dns_zone.dns_zone.name
resource_group_name = azurerm_resource_group.main_resource_group.name
ttl = 10
records = [azurerm_private_endpoint.endpoint.private_service_connection.0.private_ip_address]
}
The problem is, my endpoint does not work ! But, if i try to add the service-endpoint manually then everything works well like a charm. So,i think, my DNS Zone is correct, also apparently the link to the storage-account is also working well. So, I think there should be something wrong with my private-link ! Any idea ?
Update:
Here are versions:
Terraform v1.2.5
on windows_386
+ provider registry.terraform.io/hashicorp/azurerm v3.30.0
I believe the issue lies in the name of the dns_a_record. This should be the name of the storage account you want to reach via the private link.
The following Terraform code is working for me:
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=3.30.0"
}
}
}
provider "azurerm" {
features {
resource_group {
prevent_deletion_if_contains_resources = false
}
}
}
resource "azurerm_resource_group" "main_resource_group" {
name = "RG-Terraform-on-Azure"
location = "West Europe"
}
# Create Virtual-Network
resource "azurerm_virtual_network" "virtual_network" {
name = "Vnet"
address_space = ["10.0.0.0/16"]
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
}
# Create subnet for virtual-machine
resource "azurerm_subnet" "virtual_network_subnet" {
name = "vm_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.1.0/24"]
}
# Create subnet for storage account
resource "azurerm_subnet" "storage_account_subnet" {
name = "storage_account_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.2.0/24"]
}
# Create Linux Virtual machine
resource "azurerm_linux_virtual_machine" "example" {
name = "example-machine"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
size = "Standard_F2"
admin_username = "adminuser"
admin_password = "14394Las?"
disable_password_authentication = false
network_interface_ids = [
azurerm_network_interface.virtual_machine_network_interface.id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
}
resource "azurerm_network_interface" "virtual_machine_network_interface" {
name = "vm-nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.vm_public_ip.id
}
}
# Create Network-interface and public-ip for virtual-machien
resource "azurerm_public_ip" "vm_public_ip" {
name = "vm-public-ip-for-rdp"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_network_interface" "virtual_network_nic" {
name = "storage-private-endpoint-nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "storage-private-endpoint-ip-config"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
}
}
# Setup an Inbound rule because we need to connect to the virtual-machine using RDP (remote-desktop-protocol)
resource "azurerm_network_security_group" "traffic_rules" {
name = "vm_traffic_rules"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
security_rule {
name = "virtual_network_permission"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "private_nsg_asso" {
subnet_id = azurerm_subnet.virtual_network_subnet.id
network_security_group_id = azurerm_network_security_group.traffic_rules.id
}
# Setup storage_account and its container
resource "azurerm_storage_account" "storage_account" {
name = <STORAGE_ACCOUNT_NAME>
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
account_tier = "Standard"
account_replication_type = "LRS"
account_kind = "StorageV2"
is_hns_enabled = "true"
}
resource "azurerm_storage_data_lake_gen2_filesystem" "data_lake_storage" {
name = "rawdata"
storage_account_id = azurerm_storage_account.storage_account.id
lifecycle {
prevent_destroy = false
}
}
# Setup DNS zone
resource "azurerm_private_dns_zone" "dns_zone" {
name = "privatelink.blob.core.windows.net"
resource_group_name = azurerm_resource_group.main_resource_group.name
}
resource "azurerm_private_dns_zone_virtual_network_link" "network_link" {
name = "vnet-link"
resource_group_name = azurerm_resource_group.main_resource_group.name
private_dns_zone_name = azurerm_private_dns_zone.dns_zone.name
virtual_network_id = azurerm_virtual_network.virtual_network.id
}
# Setup private-link
resource "azurerm_private_endpoint" "endpoint" {
name = "storage-private-endpoint"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
subnet_id = azurerm_subnet.storage_account_subnet.id
private_service_connection {
name = "storage-private-service-connection"
private_connection_resource_id = azurerm_storage_account.storage_account.id
is_manual_connection = false
subresource_names = ["blob"]
}
}
resource "azurerm_private_dns_a_record" "dns_a" {
name = azurerm_storage_account.storage_account.name
zone_name = azurerm_private_dns_zone.dns_zone.name
resource_group_name = azurerm_resource_group.main_resource_group.name
ttl = 10
records = [azurerm_private_endpoint.endpoint.private_service_connection.0.private_ip_address]
}
Additionally, I'm not sure whether it is possible to ping storage accounts. To test I ran nslookup <STORAGE_ACCOUNT_NAME>.blob.core.windows.net both from my local machine and from the Azure VM. In the former case, I got a public IP while in the latter I got a private IP in the range defined in the Terraform config, which seems to be the behaviour you are looking for.
Currently i'm working on Terraform AKS Cluster creation with kubenet and User defined route (UDR) based under corporate network(I'm using existing subnet) and associating route-table to subnet. i was able provisioned cluster Until kubernetes orchestration version 1.15. Azure now stopped supporting it. Whenever i've upgraded to latest version to 1.17 or 1.18 the cluster provision failing with below error information .
I've associated default route "0.0.0.0/0" and next_hop_type as Internet then i'm getting below error. May be i'm missing somewhere but not sure what i've done wrongly.
Original Error: Code="RouteTableInvalidNextHop" Message="Default route
0.0.0.0/0 has a next hop of Internet but only next hops of VirtualAppliance or VirtualNetworkGateway are allowed. Please see
http://aka.ms/aks/outboundtype for more details
Here is my code snippet
provider "azurerm" {
version = "~> 2.15"
features {}
}
resource "azurerm_resource_group" "aks" {
name = var.resource_group
location = var.location
}
#fetch existing subnet
data "azurerm_subnet" "aks" {
name = var.subnetname
virtual_network_name = var.virtual_network_name
resource_group_name = var.vnet_resource_group
}
resource "azurerm_route_table" "aks"{
name = "aks" #var.subnetname
resource_group_name = azurerm_resource_group.aks.name
location = azurerm_resource_group.aks.location
disable_bgp_route_propagation = false
route{
name = var.route_name
address_prefix = var.route_address_prefix
next_hop_type = var.route_next_hop_type
}
route{
name = "default_route"
address_prefix = "0.0.0.0/0"
next_hop_type = "Internet"
#next_hop_in_ip_address = "10.65.27.1"
}
}
resource "azurerm_subnet_route_table_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
route_table_id = azurerm_route_table.aks.id
}
resource "azurerm_subnet_network_security_group_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
network_security_group_id = var.network_security_group
}
resource "null_resource" "previous" {}
resource "time_sleep" "wait_90_seconds" {
depends_on = [null_resource.previous]
create_duration = "90s"
}
# This resource will create (at least) 30 seconds after null_resource.previous
resource "null_resource" "next" {
depends_on = [time_sleep.wait_90_seconds]
}
resource "azurerm_kubernetes_cluster" "aks" {
name = azurerm_resource_group.aks.name
resource_group_name = azurerm_resource_group.aks.name
location = azurerm_resource_group.aks.location
dns_prefix = "akstfe" #The dns_prefix must contain between 3 and 45 characters, and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number.
kubernetes_version = "1.18.8"
private_cluster_enabled = false
node_resource_group = var.node_resource_group
api_server_authorized_ip_ranges = [] #var.api_server_authorized_ip_ranges
default_node_pool {
enable_node_public_ip = false
name = "agentpool"
node_count = var.node_count
orchestrator_version = "1.18.8"
vm_size = var.vm_size
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = data.azurerm_subnet.aks.id
}
linux_profile {
admin_username = var.admin_username
ssh_key {
key_data = var.ssh_key
}
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
role_based_access_control {
enabled = true
}
network_profile {
network_plugin = "kubenet"
#dns_service_ip = "172.17.1.10"
#service_cidr = "172.16.0.0/14"
pod_cidr = "172.40.0.0/16"
network_policy = "calico"
outbound_type = "userDefinedRouting"
load_balancer_sku = "Standard"
}
addon_profile {
aci_connector_linux {
enabled = false
}
azure_policy {
enabled = false
}
http_application_routing {
enabled = false
}
kube_dashboard {
enabled = false
}
oms_agent {
enabled = false
}
}
depends_on = [azurerm_subnet_route_table_association.aks]
}
0.0.0.0/0 with next hop to internet doesn't make any sense. its a default anyway. just drop it.
thats pretty much what the error tells you anyway.
I have some code which makes multiple vms I'm trying dynamically add them to the load balancer address pool but I'm met with the following error which I have no idea what it means, any help will be appreciated as the error appears to be somewhat obscure
Error: Error: IP Configuration "azure_network_interface_address_pool_association" was not found on Network Interface "client_host_nic-0" (Resource Group "client_rg")
on vm2.tf line 99, in resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association":
99: resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association" {
vm2.tf file includes
# Create virtual machine
resource "azurerm_network_interface" "client_nics" {
count = var.node_count
name = "client_host_nic-${count.index}"
location = var.resource_group_location
resource_group_name = module.network.azurerm_resource_group_client_name
# network_security_group_id = module.network.bastion_host_network_security_group
ip_configuration {
name = "client_host_nic"
subnet_id = module.network.client_subnet_id
private_ip_address_allocation = "Dynamic"
# public_ip_address_id = module.network.bastion_host_puplic_ip_address #optional field we have a bastion host so no need for public IP also its vnet peered so this adds an extra layer of securit in a way
}
tags = {
environment = "Production"
}
}
# Generate random text for a unique storage account name
resource "random_id" "randomId_Generator" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = var.resource_group_location
}
byte_length = 8
}
# Create storage account for boot diagnostics
resource "azurerm_storage_account" "client_storageaccount" {
name = "diag${random_id.randomId_Generator.hex}"
resource_group_name = module.network.azurerm_resource_group_client_name
location = var.resource_group_location
account_tier = "Standard"
account_replication_type = "LRS"
tags = {
environment = "Production"
}
}
resource "azurerm_virtual_machine" "node" {
count = var.node_count
name = "client-host-${count.index}"
location = var.resource_group_location
resource_group_name = module.network.azurerm_resource_group_client_name
network_interface_ids = ["${element(azurerm_network_interface.client_nics.*.id, count.index)}"]
# Uncomment this line to delete the OS disk automatically when deleting the VM
delete_os_disk_on_termination = true
# Uncomment this line to delete the data disks automatically when deleting the VM
delete_data_disks_on_termination = true
# 1 vCPU, 3.5 Gb of RAM
vm_size = var.machine_type
storage_os_disk {
name = "myOsDisk-${count.index}"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
os_profile {
computer_name = "Production"
admin_username = "azureuser"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/azureuser/.ssh/authorized_keys" #This cannot be changed as mentioned in https://www.terraform.io/docs/providers/azurerm/r/virtual_machine.html
key_data = file("~/.ssh/client.pub")
}
}
boot_diagnostics {
enabled = "true"
storage_uri = azurerm_storage_account.client_storageaccount.primary_blob_endpoint
}
tags = {
environment = "Production"
}
}
resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association" {
count = var.node_count
network_interface_id = element(azurerm_network_interface.client_nics.*.id, count.index) #fixes interpolation issues
ip_configuration_name = "azure_network_interface_address_pool_association"
backend_address_pool_id = module.loadbalancer.azure_backend_pool_id
}
load balancer module main.tf file
#rember when using this module to call the network module for the resource group name
############## load balancer section ##############
resource "azurerm_public_ip" "azure_load_balancer_IP" {
name = "azure_load_balancer_IP"
location = var.resource_group_location
resource_group_name = var.resource_group_name
allocation_method = "Static"
}
resource "azurerm_lb" "azure_load_balancer" {
name = "TestLoadBalancer"
location = var.resource_group_location
resource_group_name = var.resource_group_name
frontend_ip_configuration {
name = "front_end_IP_configuration_for_azure_load_balancer"
public_ip_address_id = azurerm_public_ip.azure_load_balancer_IP.id
}
}
resource "azurerm_lb_backend_address_pool" "backend_address_pool" {
resource_group_name = var.resource_group_name
loadbalancer_id = azurerm_lb.azure_load_balancer.id
name = "BackEndAddressPool"
}
resource "azurerm_lb_rule" "azure_lb_rule" {
resource_group_name = var.resource_group_name
loadbalancer_id = azurerm_lb.azure_load_balancer.id
name = "LBRule"
protocol = "Tcp"
frontend_port = 80
backend_port = 80
frontend_ip_configuration_name = "front_end_IP_configuration_for_azure_load_balancer"
}
output.tf
output "azure_load_balancer_ip" {
value = azurerm_public_ip.azure_load_balancer_IP.id
}
output "azure_backend_pool_id" {
value = azurerm_lb_backend_address_pool.backend_address_pool.id
}
additional information
* provider.azurerm: version = "~> 2.1"
main.tf
module "loadbalancer" {
source = "./azure_load_balancer_module" #this may need to be a different git repo as we are not referencing branches here only the master
resource_group_name = module.network.azurerm_resource_group_client_name
resource_group_location = var.resource_group_location
}
The error means the Ipconfiguration name you set for the network interface is not the same as you set for the resource azurerm_network_interface_backend_address_pool_association. You can take a look at the description for ip_configuration_name here. And as I see, you want to associate multiple interfaces with the load balancer.
So I recommend you change the network interface and the association like this:
resource "azurerm_network_interface" "client_nics" {
count = var.node_count
name = "client_host_nic-${count.index}"
location = var.resource_group_location
resource_group_name = module.network.azurerm_resource_group_client_name
# network_security_group_id = module.network.bastion_host_network_security_group
ip_configuration {
name = "client_host_nic-${count.index}"
subnet_id = module.network.client_subnet_id
private_ip_address_allocation = "Dynamic"
# public_ip_address_id = module.network.bastion_host_puplic_ip_address #optional field we have a bastion host so no need for public IP also its vnet peered so this adds an extra layer of securit in a way
}
tags = {
environment = "Production"
}
}
resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association" {
count = var.node_count
network_interface_id = element(azurerm_network_interface.client_nics.*.id, count.index) #fixes interpolation issues
ip_configuration_name = "client_host_nic-${count.index}"
backend_address_pool_id = module.loadbalancer.azure_backend_pool_id
}
I believe this was a bug with the azure 2.1 terraform provider and seems to have been fixed in version 2.33 according to upstream https://github.com/terraform-providers/terraform-provider-azurerm/issues/3794
I am using an existing template from github and have modified it a little bit. When I terraform.exe plan it says it will deploy 4 resources (NIC, NSG, VM-SA, and Resource Group).
I am trying to deploy a VM and have it joined to an existing VNet.
I have removed the NIC to see if it would add the windows VM for deployment and it does not
main.tf
# Configure the Microsoft Azure Provider
provider "azurerm" {
subscription_id = "************************************"
tenant_id = "************************************"
client_id = "************************************"
client_secret = "************************************"
}
module "os" {
source = "./os"
vm_os_simple = "${var.vm_os_simple}"
}
resource "azurerm_resource_group" "vm" {
name = "${var.resource_group_name}"
location = "${var.location}"
tags = "${var.tags}"
}
resource "random_id" "vm-sa" {
keepers = {
vm_hostname = "${var.vm_hostname}"
}
byte_length = 6
}
resource "azurerm_network_security_group" "nsg" {
name = "${var.network_security_group}"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
}
resource "azurerm_storage_account" "vm-sa" {
count = "${var.boot_diagnostics == "true" ? 1 : 0}"
name = "bootdiag${lower(random_id.vm-sa.hex)}"
resource_group_name = "${azurerm_resource_group.vm.name}"
location = "${var.location}"
account_tier = "${element(split("_", var.boot_diagnostics_sa_type),0)}"
account_replication_type = "${element(split("_", var.boot_diagnostics_sa_type),1)}"
tags = "${var.tags}"
}
resource "azurerm_virtual_machine" "vm-windows" {
count = "${((var.is_windows_image == "true" || contains(list("${var.vm_os_simple}","${var.vm_os_offer}"), "Windows")) && var.data_disk == "false") ? var.nb_instances : 0}"
name = "${var.vm_hostname}${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.vm.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${element(azurerm_network_interface.vm.*.id, count.index)}"]
delete_os_disk_on_termination = "${var.delete_os_disk_on_termination}"
storage_image_reference {
id = "${var.vm_os_id}"
publisher = "${var.vm_os_id == "" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : ""}"
offer = "${var.vm_os_id == "" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : ""}"
sku = "${var.vm_os_id == "" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : ""}"
version = "${var.vm_os_id == "" ? var.vm_os_version : ""}"
}
storage_os_disk {
name = "osdisk-${var.vm_hostname}-${count.index}"
create_option = "FromImage"
caching = "ReadWrite"
managed_disk_type = "${var.storage_account_type}"
}
os_profile {
computer_name = "${var.vm_hostname}${count.index}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
tags = "${var.tags}"
os_profile_windows_config {
provision_vm_agent = true
}
boot_diagnostics {
enabled = "${var.boot_diagnostics}"
storage_uri = "${var.boot_diagnostics == "true" ? join(",", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : "" }"
}
}
#refer to a subnet
data "azurerm_subnet" "test" {
name = "SubnetName"
virtual_network_name = "VNetName"
resource_group_name = "VNetresourceGroupName"
}
resource "azurerm_network_interface" "vm" {
count = "${var.nb_instances}"
name = "nic-${var.vm_hostname}-${count.index}"
location = "${azurerm_resource_group.vm.location}"
resource_group_name = "${azurerm_resource_group.vm.name}"
network_security_group_id = "${azurerm_network_security_group.nsg.id}"
ip_configuration {
name = "ipconfig${count.index}"
subnet_id = "${data.azurerm_subnet.test.id}"
private_ip_address_allocation = "Dynamic"
}
tags = "${var.tags}"
}
Expected results should be Deploy a VM, Storage Account, Network Security Group and Nic that is joined to an existing VNet
To create an Azure VM through Terraform, you can see the whole steps in Create a complete Linux virtual machine infrastructure in Azure with Terraform, it's a Linux VM, but you can change the image into windows and the os_profile_linux_config into os_profile_windows_config.
In an existing Vnet, you can use the Terraform data to quote the Vnet as you provide:
data "azurerm_subnet" "existing" {
name = "SubnetName"
virtual_network_name = "VNetName"
resource_group_name = "VNetresourceGroupName"
}
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_security_group_id = "${azurerm_network_security_group.myterraformnsg.id}"
ip_configuration {
name = "myNicConfiguration"
subnet_id = "${data.azurerm_subnet.existing.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.myterraformpublicip.id}"
}
tags {
environment = "Terraform Demo"
}
}
The whole Terraform code here and you can change the information about the VM as you want.
# Configure the Microsoft Azure Provider
provider "azurerm" {
subscription_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
client_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
client_secret = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
tenant_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
# Create a resource group if it doesn’t exist
resource "azurerm_resource_group" "myterraformgroup" {
name = "myResourceGroup"
location = "eastus"
tags {
environment = "Terraform Demo"
}
}
# the existing subnet of the virtual network
data "azurerm_subnet" "existing" {
name = "SubnetName"
virtual_network_name = "VNetName"
resource_group_name = "VNetresourceGroupName"
}
# Create public IPs
resource "azurerm_public_ip" "myterraformpublicip" {
name = "myPublicIP"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
allocation_method = "Dynamic"
tags {
environment = "Terraform Demo"
}
}
# Create Network Security Group and rule
resource "azurerm_network_security_group" "myterraformnsg" {
name = "myNetworkSecurityGroup"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
security_rule {
name = "RDP"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "3306"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags {
environment = "Terraform Demo"
}
}
# Create network interface
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_security_group_id = "${azurerm_network_security_group.myterraformnsg.id}"
ip_configuration {
name = "myNicConfiguration"
subnet_id = "${data.azurerm_subnet.existing.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.myterraformpublicip.id}"
}
tags {
environment = "Terraform Demo"
}
}
# Generate random text for a unique storage account name
resource "random_id" "randomId" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = "${azurerm_resource_group.myterraformgroup.name}"
}
byte_length = 8
}
# Create storage account for boot diagnostics
resource "azurerm_storage_account" "mystorageaccount" {
name = "diag${random_id.randomId.hex}"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
location = "eastus"
account_tier = "Standard"
account_replication_type = "LRS"
tags {
environment = "Terraform Demo"
}
}
# Create virtual machine
resource "azurerm_virtual_machine" "myterraformvm" {
name = "myVM"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_interface_ids = ["${azurerm_network_interface.myterraformnic.id}"]
vm_size = "Standard_DS1_v2"
storage_os_disk {
name = "myOsDisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter-Server-Core-smalldisk"
version = "latest"
}
os_profile {
computer_name = "myvm"
admin_username = "azureuser"
admin_password = "Passwd#!1234"
}
os_profile_windows_config {
provision_vm_agent = true
}
boot_diagnostics {
enabled = "true"
storage_uri = "${azurerm_storage_account.mystorageaccount.primary_blob_endpoint}"
}
tags {
environment = "Terraform Demo"
}
}
For more details, see Azure Virtual Machine in Terraform.