Azure Terraform | remote-exec Bastion Host VM - azure

Example Reffered: https://medium.com/devops-dudes/how-to-setup-completely-private-azure-kubernetes-service-aks-clusters-with-azure-private-links-b800a5a6776f
##
# Configure the Azure Provider
##
provider "azurerm" {
version = "=2.8.0"
features {}
}
##
# Define variables for location, service principal for AKS and Bastion VM Admin
##
variable "location" {
type = map(string)
default = {
value = "West Europe"
suffix = "westeurope" # The corresponding value of location that is used by Azure in naming AKS resource groups
}
}
variable "aks_service_principal" {
type = map(string)
/* Set value with .tfvars
{
client_id = "xxxxx"
client_secret = "yyyyy"
}
*/
}
variable "bastion_admin" {
type = map(string)
/* Set value with .tfvars
{
username = "xxxxx"
password = "yyyyy"
}
*/
}
##
# Create a resource group for the azure resources
##
resource "azurerm_resource_group" "my_rg" {
name = "rg-private-aks-demo"
location = var.location.value
}
##
# Create Vnet and subnet for the AKS cluster
##
resource "azurerm_virtual_network" "vnet_cluster" {
name = "vnet-private-aks-demo"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
address_space = ["10.1.0.0/16"]
}
resource "azurerm_subnet" "snet_cluster" {
name = "snet-private-aks-demo"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_cluster.name
address_prefixes = ["10.1.0.0/24"]
# Enforce network policies to allow Private Endpoint to be added to the subnet
enforce_private_link_endpoint_network_policies = true
}
##
# Create Vnet and subnet for the Bastion VM
##
resource "azurerm_virtual_network" "vnet_bastion" {
name = "vnet-bastion-demo"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
address_space = ["10.0.0.0/16"]
}
resource "azurerm_subnet" "snet_bastion_vm" {
name = "snet-bastion-demo"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_bastion.name
address_prefixes = ["10.0.0.0/24"]
}
resource "azurerm_subnet" "snet_azure_bastion_service" {
# The subnet name cannot be changed as the azure bastion host depends on the same
name = "AzureBastionSubnet"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_bastion.name
address_prefixes = ["10.0.1.0/24"]
}
##
# Create Vnet peering for the bastion VM to be able to access the cluster Vnet and IPs
##
resource "azurerm_virtual_network_peering" "peering_bastion_cluster" {
name = "peering_bastion_cluster"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_bastion.name
remote_virtual_network_id = azurerm_virtual_network.vnet_cluster.id
}
resource "azurerm_virtual_network_peering" "peering_cluster_bastion" {
name = "peering_cluster_bastion"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_cluster.name
remote_virtual_network_id = azurerm_virtual_network.vnet_bastion.id
}
##
# Create the AKS Cluster
##
resource "azurerm_kubernetes_cluster" "my_aks" {
name = "aks-my-cluster"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
dns_prefix = "aks-cluster"
# Make the cluster private
private_cluster_enabled = true
# Improve security using Azure AD, K8s roles and rolebindings.
# Each Azure AD user can gets his personal kubeconfig and permissions managed through AD Groups and Rolebindings
role_based_access_control {
enabled = true
}
# Enable Kubernetes Dashboard, if needed
addon_profile {
kube_dashboard {
enabled = true
}
}
# To prevent CIDR collition with the 10.0.0.0/16 Vnet
network_profile {
network_plugin = "kubenet"
docker_bridge_cidr = "192.167.0.1/16"
dns_service_ip = "192.168.1.1"
service_cidr = "192.168.0.0/16"
pod_cidr = "172.16.0.0/22"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
vnet_subnet_id = azurerm_subnet.snet_cluster.id
}
service_principal {
client_id = var.aks_service_principal.client_id
client_secret = var.aks_service_principal.client_secret
}
}
##
# Link the Bastion Vnet to the Private DNS Zone generated to resolve the Server IP from the URL in Kubeconfig
##
resource "azurerm_private_dns_zone_virtual_network_link" "link_bastion_cluster" {
name = "dnslink-bastion-cluster"
# The Terraform language does not support user-defined functions, and so only the functions built in to the language are available for use.
# The below code gets the private dns zone name from the fqdn, by slicing the out dns prefix
private_dns_zone_name = join(".", slice(split(".", azurerm_kubernetes_cluster.my_aks.private_fqdn), 1, length(split(".", azurerm_kubernetes_cluster.my_aks.private_fqdn))))
resource_group_name = "MC_${azurerm_resource_group.my_rg.name}_${azurerm_kubernetes_cluster.my_aks.name}_${var.location.suffix}"
virtual_network_id = azurerm_virtual_network.vnet_bastion.id
}
##
# Create a Bastion VM
##
resource "azurerm_network_interface" "bastion_nic" {
name = "nic-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.snet_bastion_vm.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_linux_virtual_machine" "example" {
name = "vm-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
size = "Standard_D2_v2"
admin_username = var.bastion_admin.username
admin_password = var.bastion_admin.password
disable_password_authentication = false
network_interface_ids = [
azurerm_network_interface.bastion_nic.id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
}
##
# Create an Azure Bastion Service to access the Bastion VM
##
resource "azurerm_public_ip" "pip_azure_bastion" {
name = "pip-azure-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_bastion_host" "azure-bastion" {
name = "azure-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
ip_configuration {
name = "configuration"
subnet_id = azurerm_subnet.snet_azure_bastion_service.id
public_ip_address_id = azurerm_public_ip.pip_azure_bastion.id
}
}
This example works fine, where in only the Bastion Host can access Kubernetes API.
Now I am trying to do helm-release from Terraform. BUT I have do run helm release from within the Bastion Host.
Is there any way to run helm-release on remote?

Related

Terraform - How to install AZURE CNI on AKS cluster and set POD IP Range?

I am trying to create an AKS cluster in azure using the terraform. My requirements are as follows:
Create a site-to-site VPN connection where the gateway in the subnet of range 172.30.0.0/16 - This is done
Install Azure AKS cluster with Azure CNI and pod's should be in the range of VPN CIDR (172.30.0.0/16).
Here's my terraform code. I read that if you use azure as your network_policy and network_plugin, you can't set the pod_cidr - source
Then how can I do this so my PODs can reach the on-premise network through the site-to-site vpn?
resource "azurerm_kubernetes_cluster" "k8s_cluster" {
lifecycle {
ignore_changes = [
default_node_pool[0].node_count
]
prevent_destroy = false
}
name = var.cluster_name
location = var.location
resource_group_name = var.rg_name
dns_prefix = var.dns_prefix
kubernetes_version = var.kubernetes_version
# node_resource_group = var.resource_group_name
default_node_pool {
name = var.default_node_pool.name
node_count = var.default_node_pool.node_count
max_count = var.default_node_pool.max_count
min_count = var.default_node_pool.min_count
vm_size = var.default_node_pool.vm_size
os_disk_size_gb = var.default_node_pool.os_disk_size_gb
# vnet_subnet_id = var.vnet_subnet_id
max_pods = var.default_node_pool.max_pods
type = var.default_node_pool.agent_pool_type
enable_node_public_ip = var.default_node_pool.enable_node_public_ip
enable_auto_scaling = var.default_node_pool.enable_auto_scaling
tags = merge(var.common_tags)
}
identity {
type = var.identity
}
network_profile {
network_plugin = var.network_plugin #azure
network_policy = var.network_policy #"azure"
load_balancer_sku = var.load_balancer_sku #"standard"
# pod_cidr = var.pod_cidr | When network_plugin is set to azure - the vnet_subnet_id field in the default_node_pool block must be set and pod_cidr must not be set.
}
tags = merge(var.common_tags)
}
# AKS cluster related variables
cluster_name = "test-cluster"
dns_prefix = "testjana"
kubernetes_version = "1.22.15"
default_node_pool = {
name = "masternp" # for system pods
node_count = 1
vm_size = "standard_e4bds_v5" # 4 vcpu and 32 Gb of memory
enable_auto_scaling = false
enable_node_public_ip = false
min_count = null
max_count = null
max_pods = 100
os_disk_size_gb = 80
agent_pool_type = "VirtualMachineScaleSets"
}
admin_username = "jananathadmin"
ssh_public_key = "public_key"
identity = "SystemAssigned"
network_plugin = "azure"
network_policy = "azure"
load_balancer_sku = "standard"
Default, all PODs in AKS will communicate each other, when we want to restrict the traffic, network policies can be used to allow or deny traffic between pods.
Here is the tutorial link
Reproduced the same via terraform using below code snippet to connect cluster with azure CNI and a vnet gateway which links our on-prem environment to azure via a site-to-site VPN.
Step1:
main tf file as follows
resource "azurerm_resource_group" "example" {
name = "*****-****"
location = "East US"
}
resource "azurerm_role_assignment" "role_acrpull" {
scope = azurerm_container_registry.acr.id
role_definition_name = "AcrPull"
principal_id = azurerm_kubernetes_cluster.demo.kubelet_identity.0.object_id
}
resource "azurerm_container_registry" "acr" {
name = "acrswarna"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
sku = "Standard"
admin_enabled = false
}
resource "azurerm_virtual_network" "puvnet" {
name = "Publics_VNET"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
address_space = ["10.19.0.0/16"]
}
resource "azurerm_subnet" "example" {
name = "GatewaySubnet"
resource_group_name = azurerm_resource_group.example.name
virtual_network_name = azurerm_virtual_network.puvnet.name
address_prefixes = ["10.19.3.0/24"]
}
resource "azurerm_subnet" "osubnet" {
name = "Outer_Subnet"
resource_group_name = azurerm_resource_group.example.name
address_prefixes = ["10.19.1.0/24"]
virtual_network_name = azurerm_virtual_network.puvnet.name
}
resource "azurerm_kubernetes_cluster" "demo" {
name = "demo-aksnew"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "demo-aks"
default_node_pool {
name = "default"
node_count = 2
vm_size = "standard_e4bds_v5"
type = "VirtualMachineScaleSets"
enable_auto_scaling = false
min_count = null
max_count = null
max_pods = 100
//vnet_subnet_id = azurerm_subnet.osubnet.id
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = "azure"
load_balancer_sku = "standard"
network_policy = "azure"
}
tags = {
Environment = "Development"
}
}
resource "azurerm_public_ip" "example" {
name = "pips-firewall"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_virtual_network_gateway" "example" {
name = "test"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
type = "Vpn"
vpn_type = "RouteBased"
active_active = false
enable_bgp = false
sku = "VpnGw1"
ip_configuration {
name = "vnetGatewayConfig"
public_ip_address_id = azurerm_public_ip.example.id
private_ip_address_allocation = "Dynamic"
subnet_id = azurerm_subnet.example.id
}
vpn_client_configuration {
address_space = ["172.30.0.0/16"]
root_certificate {
name = "******-****-ID-Root-CA"
public_cert_data = <<EOF
**Use certificate here**
EOF
}
revoked_certificate {
name = "*****-Global-Root-CA"
thumbprint = "****************"
}
}
}
NOTE: Update root certificate configuration by own on above code.
Provider tf file as follow
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=3.0.0"
}
}
}
provider "azurerm" {
features {}
skip_provider_registration = true
}
upon running
terraform plan
terraform apply -auto-approve
Vnet and SubNet configurations
Virtual Network Gateway configuraiton as follows.
Deployed sample pods on cluster

How to configure the VMs in two different zones using Availability Sets and install the Active Directory Domain services

I’m trying to create the two windows virtual machines in two different zones using the following terraform code:
## Import exisiting resource group
## Use this data source to access information about an existing Resource Group
data "azurerm_resource_group" "resource_group" {
name = var.existing_rg_name
}
## Import exisiting virtual network
## Use this data source to access information about an existing Virtual Network.
data "azurerm_virtual_network" "virtual_network" {
resource_group_name = var.existing_rg_name
name = var.existing_vnet_name
}
## Import exisiting subnet with in a virtual network
## Use this data source to access information about an existing Subnet within a Virtual Network.
data "azurerm_subnet" "subnet" {
name = var.existing_subnet_name
virtual_network_name = var.existing_vnet_name
resource_group_name = var.existing_rg_name
}
## Configure Availiablility set
resource "azurerm_availability_set" "availability_set" {
name = var.avset_name
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_resource_group.resource_group.location
platform_fault_domain_count = 2
platform_update_domain_count = 2
managed = true
}
## Create Public IP
resource "azurerm_public_ip" "public_ip" {
name = var.pip_name
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_resource_group.resource_group.location
allocation_method = "Dynamic"
}
## Create network interface for VM
resource "azurerm_network_interface" "vm_nic" {
name = var.nic_name
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_resource_group.resource_group.location
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.public_ip.id
}
}
## Create Windows Virtual Machine
resource "azurerm_windows_virtual_machine" "virtual_machine" {
name = var.vm_name
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_resource_group.resource_group.location
size = var.vm_size
admin_username = var.vm_username
admin_password = var.vm_password
network_interface_ids = [
azurerm_network_interface.vm_nic.id
]
availability_set_id = azurerm_availability_set.availability_set.id
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2019-Datacenter"
version = "latest"
}
depends_on = [
azurerm_network_interface.vm_nic
]
}
I want to configure the VMs in two different zones using Availability Sets and install the Active Directory Domain services using terraform.
You can use something like below to deploy 2 VM's and Create a new active directory forest in one and in other you can just add it to domain and promote both as Domain Controllers:
Availability Set:
Main.tf:
provider "azurerm" {
features{}
}
## Import exisiting resource group
## Use this data source to access information about an existing Resource Group
data "azurerm_resource_group" "resource_group" {
name = "ansumantest"
}
## Import exisiting virtual network
## Use this data source to access information about an existing Virtual Network.
data "azurerm_virtual_network" "virtual_network" {
resource_group_name = data.azurerm_resource_group.resource_group.name
name = "ansuman-vnet"
}
## Import exisiting subnet with in a virtual network
## Use this data source to access information about an existing Subnet within a Virtual Network.
data "azurerm_subnet" "subnet" {
name = "default"
virtual_network_name = data.azurerm_virtual_network.virtual_network.name
resource_group_name = data.azurerm_resource_group.resource_group.name
}
## Configure Availiablility set
resource "azurerm_availability_set" "availability_set" {
name = "ansuman-avset"
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_virtual_network.virtual_network.location
platform_fault_domain_count = 2
platform_update_domain_count = 2
managed = true
}
## Create 2 Public IP
resource "azurerm_public_ip" "public_ip" {
count = 2
name = "ansuman-pip-${count.index}"
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_virtual_network.virtual_network.location
allocation_method = "Dynamic"
}
#Static Private address to be used by the server each
variable "PrivateIP" {
default=["10.0.0.5","10.0.0.6"]
}
## Create network interface for VM with adding the static Private IP's in the DNS server list
resource "azurerm_network_interface" "vm_nic" {
count = 2
name = "vm-${count.index}-nic"
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_virtual_network.virtual_network.location
dns_servers = var.PrivateIP
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.subnet.id
private_ip_address_allocation = "Static"
private_ip_address = var.PrivateIP[count.index]
public_ip_address_id = azurerm_public_ip.public_ip[count.index].id
}
}
## Create 2 Windows Virtual Machine
resource "azurerm_windows_virtual_machine" "virtual_machine" {
count = 2
name = "AZDC-${count.index}"
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_virtual_network.virtual_network.location
size = "Standard_F8s_v2"
admin_username = "ansuman"
admin_password = "Password#1234"
network_interface_ids = [
azurerm_network_interface.vm_nic[count.index].id
]
availability_set_id = azurerm_availability_set.availability_set.id
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2019-Datacenter"
version = "latest"
}
depends_on = [
azurerm_network_interface.vm_nic
]
}
#Powershell commands to run the ADDS in the VM's
locals {
import_command = "Import-Module ADDSDeployment"
password_command = "$password = ConvertTo-SecureString ${var.admin_password} -AsPlainText -Force"
credentials_command = "$credentials = new-object -typename System.Management.Automation.PSCredential -argumentlist ${var.domainAdminUsername},$password"
install_ad_command = "Add-WindowsFeature -name ad-domain-services,dns -IncludeManagementTools"
configure_ad_command = "Install-ADDSForest -CreateDnsDelegation:$false -DomainMode Win2012R2 -DomainName ${var.active_directory_domain} -DomainNetbiosName ${var.active_directory_netbios_name} -ForestMode Win2012R2 -InstallDns:$true -SafeModeAdministratorPassword $password -Force:$true"
promote_adds_command = "Install-ADDSDomainController -DomainName ${var.active_directory_domain} -InstallDns -Credential $credentials -SafeModeAdministratorPassword $password -Force:$true"
shutdown_command = "shutdown -r -t 10"
exit_code_hack = "exit 0"
powershell_command = "${local.import_command}; ${local.password_command}; ${local.install_ad_command}; ${local.configure_ad_command}; ${local.shutdown_command}; ${local.exit_code_hack}"
powershell_promote_command = "${local.password_command};${local.credentials_command}; ${local.install_ad_command}; ${local.promote_adds_command}; ${local.shutdown_command}; ${local.exit_code_hack}"
}
#creating a forest and promoting the Primary server as a DC
resource "azurerm_virtual_machine_extension" "create-active-directory-forest" {
name = "create-active-directory-forest"
virtual_machine_id = azurerm_windows_virtual_machine.virtual_machine[0].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"${local.powershell_command}\""
}
SETTINGS
}
# Adding Secondary server to the Domain and promoting it as DC
resource "azurerm_virtual_machine_extension" "promote-to-domain-controller" {
name = "promote-to-domain-controller"
virtual_machine_id = azurerm_windows_virtual_machine.virtual_machine[1].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"${local.powershell_promote_command}\""
}
SETTINGS
depends_on = [
azurerm_virtual_machine_extension.create-active-directory-forest
]
}
Variable.tf:
variable "active_directory_domain" {
description = "The name of the Active Directory domain, for example `consoto.local`"
default = "contoso.local"
}
variable "admin_password" {
description = "The password associated with the local administrator account on the virtual machine"
default = "Password#1234"
}
variable "active_directory_netbios_name" {
description = "The netbios name of the Active Directory domain, for example `consoto`"
default = "Contoso"
}
variable "domainAdminUsername" {
description = "The local administrator account on the Domain"
default = "ansuman#contoso.local"
}
Output:
Availability Zone:
main.tf
provider "azurerm" {
features{}
}
## Import exisiting resource group
## Use this data source to access information about an existing Resource Group
data "azurerm_resource_group" "resource_group" {
name = "ansumantest"
}
## Import exisiting virtual network
## Use this data source to access information about an existing Virtual Network.
data "azurerm_virtual_network" "virtual_network" {
resource_group_name = data.azurerm_resource_group.resource_group.name
name = "ansuman-vnet"
}
## Import exisiting subnet with in a virtual network
## Use this data source to access information about an existing Subnet within a Virtual Network.
data "azurerm_subnet" "subnet" {
name = "default"
virtual_network_name = data.azurerm_virtual_network.virtual_network.name
resource_group_name = data.azurerm_resource_group.resource_group.name
}
##availabilty zones
variable "Zone" {
default=["1","2"]
}
resource "azurerm_network_security_group" "example" {
name = "ansuman-nsg"
location = data.azurerm_virtual_network.virtual_network.location
resource_group_name = data.azurerm_resource_group.resource_group.name
security_rule {
name = "test123"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "example" {
subnet_id = data.azurerm_subnet.subnet.id
network_security_group_id = azurerm_network_security_group.example.id
}
## Create 2 Public IP
resource "azurerm_public_ip" "public_ip" {
count = 2
name = "ansuman-pip-${count.index}"
sku = "Standard"
availability_zone = var.Zone[count.index]
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_virtual_network.virtual_network.location
allocation_method = "Static"
}
#Static Private address to be used by the server each
variable "PrivateIP" {
default=["10.0.0.5","10.0.0.6"]
}
## Create network interface for VM with adding the static Private IP's in the DNS server list
resource "azurerm_network_interface" "vm_nic" {
count = 2
name = "vm-${count.index}-nic"
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_virtual_network.virtual_network.location
dns_servers = var.PrivateIP
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.subnet.id
private_ip_address_allocation = "Static"
private_ip_address = var.PrivateIP[count.index]
public_ip_address_id = azurerm_public_ip.public_ip[count.index].id
}
}
## Create 2 Windows Virtual Machine
resource "azurerm_windows_virtual_machine" "virtual_machine" {
count = 2
name = "AZDC-${count.index}"
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_virtual_network.virtual_network.location
size = "Standard_F8s_v2"
admin_username = "ansuman"
admin_password = "Password#1234"
zone = var.Zone[count.index]
network_interface_ids = [
azurerm_network_interface.vm_nic[count.index].id
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2019-Datacenter"
version = "latest"
}
depends_on = [
azurerm_network_interface.vm_nic
]
}
#Powershell commands to run the ADDS in the VM's
locals {
import_command = "Import-Module ADDSDeployment"
password_command = "$password = ConvertTo-SecureString ${var.admin_password} -AsPlainText -Force"
credentials_command = "$credentials = new-object -typename System.Management.Automation.PSCredential -argumentlist ${var.domainAdminUsername},$password"
install_ad_command = "Add-WindowsFeature -name ad-domain-services,dns -IncludeManagementTools"
configure_ad_command = "Install-ADDSForest -CreateDnsDelegation:$false -DomainMode Win2012R2 -DomainName ${var.active_directory_domain} -DomainNetbiosName ${var.active_directory_netbios_name} -ForestMode Win2012R2 -InstallDns:$true -SafeModeAdministratorPassword $password -Force:$true"
promote_adds_command = "Install-ADDSDomainController -DomainName ${var.active_directory_domain} -InstallDns -Credential $credentials -SafeModeAdministratorPassword $password -Force:$true"
shutdown_command = "shutdown -r -t 10"
exit_code_hack = "exit 0"
powershell_command = "${local.import_command}; ${local.password_command}; ${local.install_ad_command}; ${local.configure_ad_command}; ${local.shutdown_command}; ${local.exit_code_hack}"
powershell_promote_command = "${local.password_command};${local.credentials_command}; ${local.install_ad_command}; ${local.promote_adds_command}; ${local.shutdown_command}; ${local.exit_code_hack}"
}
#creating a forest and promoting the Primary server as a DC
resource "azurerm_virtual_machine_extension" "create-active-directory-forest" {
name = "create-active-directory-forest"
virtual_machine_id = azurerm_windows_virtual_machine.virtual_machine[0].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"${local.powershell_command}\""
}
SETTINGS
}
# Adding Secondary server to the Domain and promoting it as DC
resource "azurerm_virtual_machine_extension" "promote-to-domain-controller" {
name = "promote-to-domain-controller"
virtual_machine_id = azurerm_windows_virtual_machine.virtual_machine[1].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"${local.powershell_promote_command}\""
}
SETTINGS
depends_on = [
azurerm_virtual_machine_extension.create-active-directory-forest
]
}
Note: Availability Set and Availability Zones cannot be configured together. It can be either or , If you want to use Zone then Set cannot be used. You can also refer this
Microsoft Community Blog for more details.
Output:
For testing login to the secondary server using your domain admin username i.e. in my case ansuman#consto.local and password.

AKS via Terraform Error: Code="CustomRouteTableWithUnsupportedMSIType"

trying to create private aks via terraform using existing vnet and subnet, was able to create cluster suddenly below error came.
│ Error: creating Managed Kubernetes Cluster "demo-azwe-aks-cluster" (Resource Group "demo-azwe-aks-rg"): containerservice.ManagedClustersClient#CreateOrUpdate: Failure sending request: StatusCode=0 -- Original Error: Code="CustomRouteTableWithUnsupportedMSIType" Message="Clusters using managed identity type SystemAssigned do not support bringing your own route table. Please see https://aka.ms/aks/customrt for more information"
│
│ with azurerm_kubernetes_cluster.aks_cluster,
│ on aks_cluster.tf line 30, in resource "azurerm_kubernetes_cluster" "aks_cluster":
│ 30: resource "azurerm_kubernetes_cluster" "aks_cluster" {
# Provision AKS Cluster
resource "azurerm_kubernetes_cluster" "aks_cluster" {
name = "${var.global-prefix}-${var.cluster-id}-${var.environment}-azwe-aks-cluster"
location = "${var.location}"
resource_group_name = azurerm_resource_group.aks_rg.name
dns_prefix = "${var.global-prefix}-${var.cluster-id}-${var.environment}-azwe-aks-cluster"
kubernetes_version = data.azurerm_kubernetes_service_versions.current.latest_version
node_resource_group = "${var.global-prefix}-${var.cluster-id}-${var.environment}-azwe-aks-nrg"
private_cluster_enabled = true
default_node_pool {
name = "dpool"
vm_size = "Standard_DS2_v2"
orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version
availability_zones = [1, 2, 3]
enable_auto_scaling = true
max_count = 2
min_count = 1
os_disk_size_gb = 30
type = "VirtualMachineScaleSets"
vnet_subnet_id = data.azurerm_subnet.aks.id
node_labels = {
"nodepool-type" = "system"
"environment" = "${var.environment}"
"nodepoolos" = "${var.nodepool-os}"
"app" = "system-apps"
}
tags = {
"nodepool-type" = "system"
"environment" = "dev"
"nodepoolos" = "linux"
"app" = "system-apps"
}
}
# Identity (System Assigned or Service Principal)
identity {
type = "SystemAssigned"
}
# Add On Profiles
addon_profile {
azure_policy {enabled = true}
oms_agent {
enabled = true
log_analytics_workspace_id = azurerm_log_analytics_workspace.insights.id
}
}
# Create Azure AD Group in Active Directory for AKS Admins
resource "azuread_group" "aks_administrators" {
name = "${azurerm_resource_group.aks_rg.name}-cluster-administrators"
description = "Azure AKS Kubernetes administrators for the ${azurerm_resource_group.aks_rg.name}-cluster."
}
RBAC and Azure AD Integration Block
role_based_access_control {
enabled = true
azure_active_directory {
managed = true
admin_group_object_ids = [azuread_group.aks_administrators.id]
}
}
# Linux Profile
linux_profile {
admin_username = "ubuntu"
ssh_key {
key_data = file(var.ssh_public_key)
}
}
# Network Profile
network_profile {
network_plugin = "kubenet"
load_balancer_sku = "Standard"
}
tags = {
Environment = "prod"
}
}
You are trying to create a Private AKS cluster with existing Vnet and existing subnet for both AKS and firewall ,So as per the error "CustomRouteTableWithUnsupportedMSIType" you need a managed identity to create a route table and a role assigned to it i.e. Network Contributor.
Network profile will be azure instead of kubenet as you are using azure vnet and its subnet.
Add on's you can use as per your requirement but please ensure you have used data block for workspace otherwise you can directly give the resourceID. So, instead of
log_analytics_workspace_id = azurerm_log_analytics_workspace.insights.id
you can use
log_analytics_workspace_id = "/subscriptions/SubscriptionID/resourcegroups/resourcegroupname/providers/microsoft.operationalinsights/workspaces/workspacename"
Example to create private cluster with existng vnet and subnets (I haven't added add on's):
provider "azurerm" {
features {}
}
#resource group as this will be referred to in managed identity creation
data "azurerm_resource_group" "base" {
name = "resourcegroupname"
}
#exisiting vnet
data "azurerm_virtual_network" "base" {
name = "ansuman-vnet"
resource_group_name = data.azurerm_resource_group.base.name
}
#exisiting subnets
data "azurerm_subnet" "aks" {
name = "akssubnet"
resource_group_name = data.azurerm_resource_group.base.name
virtual_network_name = data.azurerm_virtual_network.base.name
}
data "azurerm_subnet" "firewall" {
name = "AzureFirewallSubnet"
resource_group_name = data.azurerm_resource_group.base.name
virtual_network_name = data.azurerm_virtual_network.base.name
}
#user assigned identity required to create route table
resource "azurerm_user_assigned_identity" "base" {
resource_group_name = data.azurerm_resource_group.base.name
location = data.azurerm_resource_group.base.location
name = "mi-name"
}
#role assignment required to create route table
resource "azurerm_role_assignment" "base" {
scope = data.azurerm_resource_group.base.id
role_definition_name = "Network Contributor"
principal_id = azurerm_user_assigned_identity.base.principal_id
}
#route table
resource "azurerm_route_table" "base" {
name = "rt-aksroutetable"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
}
#route
resource "azurerm_route" "base" {
name = "dg-aksroute"
resource_group_name = data.azurerm_resource_group.base.name
route_table_name = azurerm_route_table.base.name
address_prefix = "0.0.0.0/0"
next_hop_type = "VirtualAppliance"
next_hop_in_ip_address = azurerm_firewall.base.ip_configuration.0.private_ip_address
}
#route table association
resource "azurerm_subnet_route_table_association" "base" {
subnet_id = data.azurerm_subnet.aks.id
route_table_id = azurerm_route_table.base.id
}
#firewall
resource "azurerm_public_ip" "base" {
name = "pip-firewall"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_firewall" "base" {
name = "fw-akscluster"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
ip_configuration {
name = "ip-firewallakscluster"
subnet_id = data.azurerm_subnet.firewall.id
public_ip_address_id = azurerm_public_ip.base.id
}
}
#kubernetes_cluster
resource "azurerm_kubernetes_cluster" "base" {
name = "testakscluster"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
dns_prefix = "dns-testakscluster"
private_cluster_enabled = true
network_profile {
network_plugin = "azure"
outbound_type = "userDefinedRouting"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
vnet_subnet_id = data.azurerm_subnet.aks.id
}
identity {
type = "UserAssigned"
user_assigned_identity_id = azurerm_user_assigned_identity.base.id
}
depends_on = [
azurerm_route.base,
azurerm_role_assignment.base
]
}
Output:
(Terraform Plan)
(Terraform Apply)
(Azure portal)
Note: Its bydefault that azure requires the subnet name for firewall to be AzureFirewallSubnet. If you are using subnet with any other name for firewall creation then it will error out. So, Please ensure to name the existing subnet to be used by firewall to be AzureFirewallSubnet.

Failed to create aks using existing vnet

I'm trying to create aks using terraform; the catch is I have already vnet and subnet created, I need to have the cluster created in that network.
When executing this code I'm getting an error:
locals {
environment = "prod"
resource_group = "hnk_rg_poc"
vnet_subnet_cidr = ["10.3.1.0/24"]
}
#Existing vnet with address space "10.3.1.0/24"
data "azurerm_virtual_network" "existing-vnet" {
name = "${var.vnet}"
resource_group_name = local.resource_group
}
#subnets
resource "azurerm_subnet" "vnet_subnet_id" {
name = "${var.vnet_subnet_id}"
resource_group_name = local.resource_group
address_prefixes = local.vnet_subnet_cidr
virtual_network_name = data.azurerm_virtual_network.existing-vnet.name
}
vnet_subnet_id = data.azurerm_subnet.vnet_subnet_id.id
As you are already having a existing Vnet and Subnet to be used by the AKS cluster , you have to use data block instead of resource block for the subnet.
You can use the below to create a basic aks cluster using your existing Vnet and Subnet:
provider "azurerm" {
features {}
}
#local vars
locals {
environment = "test"
resource_group = "resource_group_name"
name_prefix = "name-aks"
}
#Existing vnet with address space
data "azurerm_virtual_network" "base" {
name = "existing-vnet"
resource_group_name = local.resource_group
}
#existing subnet to be used by aks
data "azurerm_subnet" "aks" {
name = "existing-subnet"
resource_group_name = local.resource_group
virtual_network_name = data.azurerm_virtual_network.base.name
}
#kubernetes_cluster
resource "azurerm_kubernetes_cluster" "base" {
name = "${local.name_prefix}-${local.environment}"
location = data.azurerm_virtual_network.base.location
resource_group_name = data.azurerm_virtual_network.base.resource_group_name
dns_prefix = "dns-${local.name_prefix}-${local.environment}"
network_profile {
network_plugin = "azure"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
vnet_subnet_id = data.azurerm_subnet.aks.id
}
identity {
type = "SystemAssigned"
}
}
Output: (Terraform Plan)

How do i connect an azure vm network interface into an azure load balancer address pool?

I have some code which makes multiple vms I'm trying dynamically add them to the load balancer address pool but I'm met with the following error which I have no idea what it means, any help will be appreciated as the error appears to be somewhat obscure
Error: Error: IP Configuration "azure_network_interface_address_pool_association" was not found on Network Interface "client_host_nic-0" (Resource Group "client_rg")
on vm2.tf line 99, in resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association":
99: resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association" {
vm2.tf file includes
# Create virtual machine
resource "azurerm_network_interface" "client_nics" {
count = var.node_count
name = "client_host_nic-${count.index}"
location = var.resource_group_location
resource_group_name = module.network.azurerm_resource_group_client_name
# network_security_group_id = module.network.bastion_host_network_security_group
ip_configuration {
name = "client_host_nic"
subnet_id = module.network.client_subnet_id
private_ip_address_allocation = "Dynamic"
# public_ip_address_id = module.network.bastion_host_puplic_ip_address #optional field we have a bastion host so no need for public IP also its vnet peered so this adds an extra layer of securit in a way
}
tags = {
environment = "Production"
}
}
# Generate random text for a unique storage account name
resource "random_id" "randomId_Generator" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = var.resource_group_location
}
byte_length = 8
}
# Create storage account for boot diagnostics
resource "azurerm_storage_account" "client_storageaccount" {
name = "diag${random_id.randomId_Generator.hex}"
resource_group_name = module.network.azurerm_resource_group_client_name
location = var.resource_group_location
account_tier = "Standard"
account_replication_type = "LRS"
tags = {
environment = "Production"
}
}
resource "azurerm_virtual_machine" "node" {
count = var.node_count
name = "client-host-${count.index}"
location = var.resource_group_location
resource_group_name = module.network.azurerm_resource_group_client_name
network_interface_ids = ["${element(azurerm_network_interface.client_nics.*.id, count.index)}"]
# Uncomment this line to delete the OS disk automatically when deleting the VM
delete_os_disk_on_termination = true
# Uncomment this line to delete the data disks automatically when deleting the VM
delete_data_disks_on_termination = true
# 1 vCPU, 3.5 Gb of RAM
vm_size = var.machine_type
storage_os_disk {
name = "myOsDisk-${count.index}"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
os_profile {
computer_name = "Production"
admin_username = "azureuser"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/azureuser/.ssh/authorized_keys" #This cannot be changed as mentioned in https://www.terraform.io/docs/providers/azurerm/r/virtual_machine.html
key_data = file("~/.ssh/client.pub")
}
}
boot_diagnostics {
enabled = "true"
storage_uri = azurerm_storage_account.client_storageaccount.primary_blob_endpoint
}
tags = {
environment = "Production"
}
}
resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association" {
count = var.node_count
network_interface_id = element(azurerm_network_interface.client_nics.*.id, count.index) #fixes interpolation issues
ip_configuration_name = "azure_network_interface_address_pool_association"
backend_address_pool_id = module.loadbalancer.azure_backend_pool_id
}
load balancer module main.tf file
#rember when using this module to call the network module for the resource group name
############## load balancer section ##############
resource "azurerm_public_ip" "azure_load_balancer_IP" {
name = "azure_load_balancer_IP"
location = var.resource_group_location
resource_group_name = var.resource_group_name
allocation_method = "Static"
}
resource "azurerm_lb" "azure_load_balancer" {
name = "TestLoadBalancer"
location = var.resource_group_location
resource_group_name = var.resource_group_name
frontend_ip_configuration {
name = "front_end_IP_configuration_for_azure_load_balancer"
public_ip_address_id = azurerm_public_ip.azure_load_balancer_IP.id
}
}
resource "azurerm_lb_backend_address_pool" "backend_address_pool" {
resource_group_name = var.resource_group_name
loadbalancer_id = azurerm_lb.azure_load_balancer.id
name = "BackEndAddressPool"
}
resource "azurerm_lb_rule" "azure_lb_rule" {
resource_group_name = var.resource_group_name
loadbalancer_id = azurerm_lb.azure_load_balancer.id
name = "LBRule"
protocol = "Tcp"
frontend_port = 80
backend_port = 80
frontend_ip_configuration_name = "front_end_IP_configuration_for_azure_load_balancer"
}
output.tf
output "azure_load_balancer_ip" {
value = azurerm_public_ip.azure_load_balancer_IP.id
}
output "azure_backend_pool_id" {
value = azurerm_lb_backend_address_pool.backend_address_pool.id
}
additional information
* provider.azurerm: version = "~> 2.1"
main.tf
module "loadbalancer" {
source = "./azure_load_balancer_module" #this may need to be a different git repo as we are not referencing branches here only the master
resource_group_name = module.network.azurerm_resource_group_client_name
resource_group_location = var.resource_group_location
}
The error means the Ipconfiguration name you set for the network interface is not the same as you set for the resource azurerm_network_interface_backend_address_pool_association. You can take a look at the description for ip_configuration_name here. And as I see, you want to associate multiple interfaces with the load balancer.
So I recommend you change the network interface and the association like this:
resource "azurerm_network_interface" "client_nics" {
count = var.node_count
name = "client_host_nic-${count.index}"
location = var.resource_group_location
resource_group_name = module.network.azurerm_resource_group_client_name
# network_security_group_id = module.network.bastion_host_network_security_group
ip_configuration {
name = "client_host_nic-${count.index}"
subnet_id = module.network.client_subnet_id
private_ip_address_allocation = "Dynamic"
# public_ip_address_id = module.network.bastion_host_puplic_ip_address #optional field we have a bastion host so no need for public IP also its vnet peered so this adds an extra layer of securit in a way
}
tags = {
environment = "Production"
}
}
resource "azurerm_network_interface_backend_address_pool_association" "network_interface_backend_address_pool_association" {
count = var.node_count
network_interface_id = element(azurerm_network_interface.client_nics.*.id, count.index) #fixes interpolation issues
ip_configuration_name = "client_host_nic-${count.index}"
backend_address_pool_id = module.loadbalancer.azure_backend_pool_id
}
I believe this was a bug with the azure 2.1 terraform provider and seems to have been fixed in version 2.33 according to upstream https://github.com/terraform-providers/terraform-provider-azurerm/issues/3794

Resources