i'm trying to create several applications in one go and i can successfully do it with count = length(var.webapp_name) but the problem that i'm facing is that some of the apps are required to communicate with each other and i need to whitelist outbound_ip_addresses on each app
the code that i use which is causing issues is the following:
resource "azurerm_linux_web_app" "API" {
depends_on = [azurerm_subnet.subnet]
count = length(var.webapp_name)
name = lower("${var.customer4letter}-${var.env3letter}-${var.locationid3letter}-${var.servicetype}-${element(var.webapp_name, count.index)}")
location = var.location //West US 2
resource_group_name = azurerm_resource_group.rg.name
service_plan_id = azurerm_service_plan.api-farm.id
https_only = "true"
app_settings = {
"WEBSITE_USE_DIAGNOSTIC_SERVER" = "True"
}
identity {
type = "SystemAssigned"
}
site_config {
ftps_state = "FtpsOnly"
websockets_enabled = "false"
use_32_bit_worker = "false"
always_on = "true"
application_stack {
dotnet_version = "6.0"
}
dynamic "ip_restriction" {
for_each = local.ip_address_list3
content{
action = "Allow"
name = ip_restriction.value["name"]
service_tag = ip_restriction.value["service_tag"]
priority = ip_restriction.value["prior"]
} }
dynamic "ip_restriction" {
for_each = azurerm_linux_web_app.API[0].outbound_ip_addresses
content {
ip_address = cidrhost(ip_restriction.value,0)
} } } }
Local and variables set are
variable "webapp_name" {
default = [ "app1", "app2", "app3" ]
}
locals {
ip_address_list3 = [
{
service_tag = "AppService"
prior : "102",
name = "VirtualNetwork"
}
]
}
the error that i have is the following:
I Tried to reproduce the same in my environment to azure web app with outbound IP using Terraform:
Terraform Code:
provider "azurerm" {
features {}
}
locals {
resource_group_name = "test-rg"
location = "East US"
app_name_prefix = "venkatdemoapp"
}
resource "azurerm_resource_group" "test-rg" {
name = local.resource_group_name
location = local.location
}
# Create multiple web apps
resource "azurerm_app_service_plan" "test_plan" {
count = 2
name = "${local.app_name_prefix}-plan-${count.index}"
location = local.location
resource_group_name = azurerm_resource_group.test-rg.name
sku {
tier = "Basic"
size = "B1"
}
}
resource "azurerm_app_service" "thejawebapp" {
count = 2
name = "${local.app_name_prefix}-${count.index}"
location = local.location
resource_group_name = azurerm_resource_group.test-rg.name
app_service_plan_id = azurerm_app_service_plan.test_plan[count.index].id
}
# Restrict access to each web app
resource "azurerm_network_security_group" "test_nsg" {
count = 2
name = "${local.app_name_prefix}-nsg-${count.index}"
location = local.location
resource_group_name = azurerm_resource_group.test-rg.name
security_rule {
name = "Allow_HTTP_Traffic"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "80"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "Deny_All_Traffic"
priority = 200
direction = "Inbound"
access = "Deny"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# Associate each web app with its NSG.
resource "azurerm_network_interface" "test_nic" {
count = 2
name = "${local.app_name_prefix}-nic-${count.index}"
location = local.location
resource_group_name = azurerm_resource_group.test-rg.name
ip_configuration {
name = "${local.app_name_prefix}-ipconfig-${count.index}"
subnet_id = azurerm_subnet.test_subnet.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_subnet" "test_subnet" {
name = "${local.app_name_prefix}-subnet"
resource_group_name = azurerm_resource_group.test-rg.name
virtual_network_name = azurerm_virtual_network.test_vnet.name
address_prefixes = ["10.0.1.0/24"]
}
resource "azurerm_virtual_network" "test_vnet" {
name = "${local.app_name_prefix}-vnet"
location = local.location
resource_group_name = azurerm_resource_group.test-rg.name address_space = ["10.0.0.0/16"]
}
Terraform Apply:
Once ran the above code resources are created automatically with restriction.
NSG Rule:
Related
I'm following Neal Shah's instructions for deploying multiple VMs with multiple managed disks (https://www.nealshah.dev/posts/2020/05/terraform-for-azure-deploying-multiple-vms-with-multiple-managed-disks/#deploying-multiple-vms-with-multiple-datadisks)
everything works fine except for the azurerm_virtual_machine_data_disk_attachment resource which fails with the following error
│ Error: Invalid index
│
│ on main.tf line 103, in resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach":
│ 103: virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
│ ├────────────────
│ │ azurerm_linux_virtual_machine.vms is tuple with 3 elements
│ │ each.key is "datadisk_dca0-apache-cassandra-node0_disk00"
│
│ The given key does not identify an element in this collection value: a number is required.
my code is below:
locals {
vm_datadiskdisk_count_map = { for k in toset(var.nodes) : k => var.data_disk_count }
luns = { for k in local.datadisk_lun_map : k.datadisk_name => k.lun }
datadisk_lun_map = flatten([
for vm_name, count in local.vm_datadiskdisk_count_map : [
for i in range(count) : {
datadisk_name = format("datadisk_%s_disk%02d", vm_name, i)
lun = i
}
]
])
}
# create resource group
resource "azurerm_resource_group" "resource_group" {
name = format("%s-%s", var.dca, var.name)
location = var.location
}
# create availability set
resource "azurerm_availability_set" "vm_availability_set" {
name = format("%s-%s-availability-set", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
}
# create Security Group to access linux
resource "azurerm_network_security_group" "linux_vm_nsg" {
name = format("%s-%s-linux-vm-nsg", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
security_rule {
name = "AllowSSH"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# associate the linux NSG with the subnet
resource "azurerm_subnet_network_security_group_association" "linux_vm_nsg_association" {
subnet_id = "${data.azurerm_subnet.subnet.id}"
network_security_group_id = azurerm_network_security_group.linux_vm_nsg.id
}
# create NICs for apache cassandra hosts
resource "azurerm_network_interface" "vm_nics" {
depends_on = [azurerm_subnet_network_security_group_association.linux_vm_nsg_association]
count = length(var.nodes)
name = format("%s-%s-nic${count.index}", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
ip_configuration {
name = format("%s-%s-apache-cassandra-ip", var.dca, var.name)
subnet_id = "${data.azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
# create apache cassandra VMs
resource "azurerm_linux_virtual_machine" "vms" {
count = length(var.nodes)
name = element(var.nodes, count.index)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
network_interface_ids = [element(azurerm_network_interface.vm_nics.*.id, count.index)]
availability_set_id = azurerm_availability_set.vm_availability_set.id
size = var.vm_size
admin_username = var.admin_username
disable_password_authentication = true
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_pub_key
}
source_image_id = var.source_image_id
os_disk {
caching = "ReadWrite"
storage_account_type = var.storage_account_type
disk_size_gb = var.os_disk_size_gb
}
}
# create data disk(s) for VMs
resource "azurerm_managed_disk" "managed_disk" {
for_each= toset([for j in local.datadisk_lun_map : j.datadisk_name])
name= each.key
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
storage_account_type = var.storage_account_type
create_option = "Empty"
disk_size_gb = var.disk_size_gb
}
resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
managed_disk_id = azurerm_managed_disk.managed_disk[each.key].id
virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
lun = lookup(local.luns, each.key)
caching = "ReadWrite"
}
anyone know how to accomplish this? thanks!
I've tried several different approaches to this but have been unsuccessful so far, I was expecting it to work as described in Neal's post
I was able to get this working. However, I have not tested adding/removing nodes/disks yet. But this working to create multiple VMs with multiple data disks attached to each VM.
I use a variable file that I source to substitute the variables in the *.tf files.
variables.tf
variable "azure_subscription_id" {
type = string
description = "Azure Subscription ID"
default = ""
}
variable "dca" {
type = string
description = "datacenter [dca0|dca2|dca4|dca6]."
default = ""
}
variable "location" {
type = string
description = "Location of the resource group."
default = ""
}
variable "resource_group" {
type = string
description = "resource group name."
default = ""
}
variable "subnet_name" {
type = string
description = "subnet name"
default = ""
}
variable "vnet_name" {
type = string
description = "vnet name"
default = ""
}
variable "vnet_rg" {
type = string
description = "vnet resource group"
default = ""
}
variable "vm_size" {
type = string
description = "vm size"
default = ""
}
variable "os_disk_size_gb" {
type = string
description = "vm os disk size gb"
default = ""
}
variable "data_disk_size_gb" {
type = string
description = "vm data disk size gb"
default = ""
}
variable "admin_username" {
type = string
description = "admin user name"
default = ""
}
variable "ssh_pub_key" {
type = string
description = "public key for admin user"
default = ""
}
variable "source_image_id" {
type = string
description = "image id"
default = ""
}
variable "os_disk_storage_account_type" {
type = string
description = ""
default = ""
}
variable "data_disk_storage_account_type" {
type = string
description = ""
default = ""
}
variable "vm_list" {
type = map(object({
hostname = string
}))
default = {
vm0 ={
hostname = "${dca}-${name}-node-0"
},
vm1 = {
hostname = "${dca}-${name}-node-1"
}
vm2 = {
hostname = "${dca}-${name}-node-2"
}
}
}
variable "disks_per_instance" {
type = string
description = ""
default = ""
}
terraform.tfvars
# subscription
azure_subscription_id = "${azure_subscription_id}"
# name and location
resource_group = "${dca}-${name}"
location = "${location}"
dca = "${dca}"
# Network
subnet_name = "${subnet_name}"
vnet_name = "${dca}vnet"
vnet_rg = "th-${dca}-vnet"
# VM
vm_size = "${vm_size}"
os_disk_size_gb = "${os_disk_size_gb}"
os_disk_storage_account_type = "${os_disk_storage_account_type}"
source_image_id = "${source_image_id}"
# User/key info
admin_username = "${admin_username}"
ssh_pub_key = ${ssh_pub_key}
# data disk info
data_disk_storage_account_type = "${data_disk_storage_account_type}"
data_disk_size_gb = "${data_disk_size_gb}"
disks_per_instance= "${disks_per_instance}"
main.tf
# set locals for multi data disks
locals {
vm_datadiskdisk_count_map = { for k, query in var.vm_list : k => var.disks_per_instance }
luns = { for k in local.datadisk_lun_map : k.datadisk_name => k.lun }
datadisk_lun_map = flatten([
for vm_name, count in local.vm_datadiskdisk_count_map : [
for i in range(count) : {
datadisk_name = format("datadisk_%s_disk%02d", vm_name, i)
lun = i
}
]
])
}
# create resource group
resource "azurerm_resource_group" "resource_group" {
name = format("%s", var.resource_group)
location = var.location
}
# create data disk(s)
resource "azurerm_managed_disk" "managed_disk" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
name = each.key
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
storage_account_type = var.data_disk_storage_account_type
create_option = "Empty"
disk_size_gb = var.data_disk_size_gb
}
# create availability set
resource "azurerm_availability_set" "vm_availability_set" {
name = format("%s-availability-set", var.resource_group)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
}
# create Security Group to access linux
resource "azurerm_network_security_group" "linux_vm_nsg" {
name = format("%s-linux-vm-nsg", var.resource_group)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
security_rule {
name = "AllowSSH"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# associate the linux NSG with the subnet
resource "azurerm_subnet_network_security_group_association" "linux_vm_nsg_association" {
subnet_id = "${data.azurerm_subnet.subnet.id}"
network_security_group_id = azurerm_network_security_group.linux_vm_nsg.id
}
# create NICs for vms
resource "azurerm_network_interface" "nics" {
depends_on = [azurerm_subnet_network_security_group_association.linux_vm_nsg_association]
for_each = var.vm_list
name = "${each.value.hostname}-nic"
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
ip_configuration {
name = format("%s-proxy-ip", var.resource_group)
subnet_id = "${data.azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
# create VMs
resource "azurerm_linux_virtual_machine" "vms" {
for_each = var.vm_list
name = each.value.hostname
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
network_interface_ids = [azurerm_network_interface.nics[each.key].id]
availability_set_id = azurerm_availability_set.vm_availability_set.id
size = var.vm_size
source_image_id = var.source_image_id
custom_data = filebase64("cloud-init.sh")
admin_username = var.admin_username
disable_password_authentication = true
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_pub_key
}
os_disk {
caching = "ReadWrite"
storage_account_type = var.os_disk_storage_account_type
disk_size_gb = var.os_disk_size_gb
}
}
# attache data disks vms
resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
managed_disk_id = azurerm_managed_disk.managed_disk[each.key].id
virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
lun = lookup(local.luns, each.key)
caching = "ReadWrite"
}
I am trying to provision a linux image from my gallery that was generalized. However I am getting an error StorageProfile.dataDisks.lun does not have required value(s) for image specified in storage profile. However, the lun for datadisk is already set?
Looking around the closest I find is created linux image in azure, cant seem to deploy however as mentioned, the lun for the data disk has been set.
I am assuming the lun is for the OS disk but I don't set it in the [documentation][1]?
The image is based on Ubuntu 18.04 with one data disk attached on lun 0. It was customised and then generalized.
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.72.0"
}
}
}
provider "azurerm" {
features {}
}
#get my ip
data "http" "icanhazip" {
url = "http://icanhazip.com"
}
data "azurerm_ssh_public_key" "publickey" {
name = "x"
resource_group_name = "x"
}
data "azurerm_shared_image_version" "asgi" {
name = "x"
image_name = "x-generalized"
gallery_name = "x"
resource_group_name = data.azurerm_ssh_public_key.publickey.resource_group_name
}
output "public_ip" {
value = azurerm_public_ip.publicip.ip_address
}
data "azurerm_resource_group" "main" {
name = var.resource_group_name
}
data "azurerm_subnet" "vnet" {
name = var.subnet_name
virtual_network_name = var.virtual_network_name
resource_group_name = var.resource_group_name
}
# Create a Network Security Group with some rules
resource "azurerm_network_security_group" "main" {
name = "${var.linux_virtual_machine_name}-nsg"
location = data.azurerm_resource_group.main.location
resource_group_name = data.azurerm_resource_group.main.name
tags = {
environment = var.environment
region = var.region
role = var.role
owner = var.owner
}
}
resource "azurerm_network_security_rule" "sg-inbound" {
name = "SG-inbound"
priority = 103
direction = "Inbound"
access = "Deny"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
source_address_prefixes = [
"x/32",
"x/32",
"x/32",
"x/32"
]
destination_address_prefix = "*"
resource_group_name = data.azurerm_resource_group.main.name
network_security_group_name = azurerm_network_security_group.main.name
}
resource "azurerm_network_security_rule" "sg-outbound" {
name = "SG-outbound"
priority = 104
direction = "Outbound"
access = "Deny"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefixes = [
"x/32",
"x/32",
"x/32",
"x/32"
]
resource_group_name = data.azurerm_resource_group.main.name
network_security_group_name = azurerm_network_security_group.main.name
}
resource "azurerm_network_security_rule" "mongorules" {
for_each = local.nsgrules
name = each.key
direction = each.value.direction
access = each.value.access
priority = each.value.priority
protocol = each.value.protocol
source_port_range = each.value.source_port_range
destination_port_range = each.value.destination_port_range
source_address_prefix = each.value.source_address_prefix
destination_address_prefix = each.value.destination_address_prefix
resource_group_name = data.azurerm_resource_group.main.name
network_security_group_name = azurerm_network_security_group.main.name
}
# Create a network interface for VMs and attach the PIP and the NSG
# Create public IPs
resource "azurerm_public_ip" "publicip" {
name = "${var.linux_virtual_machine_name}-publicip"
location = data.azurerm_resource_group.main.location
resource_group_name = data.azurerm_resource_group.main.name
allocation_method = "Static"
sku = "Standard"
domain_name_label = var.linux_virtual_machine_name
tags = azurerm_network_security_group.main.tags
}
# create a network interface
resource "azurerm_network_interface" "nic" {
name = "${var.linux_virtual_machine_name}-nic"
location = data.azurerm_resource_group.main.location
resource_group_name = data.azurerm_resource_group.main.name
tags = azurerm_network_security_group.main.tags
ip_configuration {
name = "${var.linux_virtual_machine_name}-publicip"
subnet_id = data.azurerm_subnet.vnet.id
private_ip_address_allocation = "dynamic"
public_ip_address_id = azurerm_public_ip.publicip.id
}
}
resource "azurerm_managed_disk" "dataDisk" {
name = "${var.linux_virtual_machine_name}-datadisk"
location = data.azurerm_resource_group.main.location
resource_group_name = data.azurerm_resource_group.main.name
storage_account_type = "StandardSSD_LRS"
create_option = "Empty"
disk_size_gb = 250
tags = {
environment = var.environment
region = var.region
role = var.role
owner = var.owner
resource = "dataDisk"
}
}
# Create a new Virtual Machine based on the Golden Image
resource "azurerm_linux_virtual_machine" "vm" {
name = var.linux_virtual_machine_name
location = data.azurerm_resource_group.main.location
resource_group_name = data.azurerm_resource_group.main.name
network_interface_ids = ["${azurerm_network_interface.nic.id}"]
size = var.vm_size
admin_username = var.admin_username
computer_name = var.linux_virtual_machine_name
disable_password_authentication = true
source_image_id = data.azurerm_shared_image_version.asgi.id
tags = azurerm_network_security_group.main.tags
os_disk {
name = "${var.linux_virtual_machine_name}-osDisk"
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
admin_ssh_key {
username = "x"
public_key = data.azurerm_ssh_public_key.publickey.public_key
}
}
resource "azurerm_virtual_machine_data_disk_attachment" "attachDisk" {
managed_disk_id = azurerm_managed_disk.dataDisk.id
virtual_machine_id = azurerm_linux_virtual_machine.vm.id
lun = 1
caching = "ReadWrite"
depends_on = [
azurerm_linux_virtual_machine.vm
]
}```
[1]: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/linux_virtual_machine
Please use below:
resource "azurerm_virtual_machine_data_disk_attachment" "attachDisk" {
managed_disk_id = azurerm_managed_disk.dataDisk.id
virtual_machine_id = azurerm_linux_virtual_machine.vm.id
lun = "1"
caching = "ReadWrite"
depends_on = [
azurerm_linux_virtual_machine.vm
]
}```
Instead of
resource "azurerm_virtual_machine_data_disk_attachment" "attachDisk" {
managed_disk_id = azurerm_managed_disk.dataDisk.id
virtual_machine_id = azurerm_linux_virtual_machine.vm.id
lun = 1
caching = "ReadWrite"
depends_on = [
azurerm_linux_virtual_machine.vm
]
}```
OR
Please check if lun 1 is being used by any other data disk that you have created earlier.
Reference:
azurerm_virtual_machine_data_disk_attachment | Resources | hashicorp/azurerm | Terraform Registry
I am trying to create a Terraform PoC that has two centos VMs and an Azure Load Balancer.
Each VM has one private and one public IP and installed the httpd package.
Even the elements are provisioned successful, accessing the Public IP of the Load Balancer does not return the default httpd content (inside the CentOS VM curl localhost or the IP returns the correct content).
No firewall enabled on CentOS.
Below it the Terraform file. (Location i am using is westeurope).
Q: What am I missing in the configuration for the Load Balancer? All items are provisioned, no error from terraform, when accessing the public ip of the load balancer I get time out instead of the default apache page.
resource "azurerm_resource_group" "test" {
name = var.rg_name
location = var.location
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_virtual_network" "test" {
name = var.vnet_name
address_space = ["192.168.0.0/16"]
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_subnet" "test" {
name = var.networks["subnet1"]
resource_group_name = azurerm_resource_group.test.name
virtual_network_name = azurerm_virtual_network.test.name
address_prefixes = ["192.168.0.0/24"]
}
resource "azurerm_public_ip" "testlb" {
name = "tf-demo-publicIPForLB"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
sku = "Standard"
allocation_method = "Static"
domain_name_label = "acndemo"
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_lb" "test" {
name = "tf-demo-loadBalancer"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
sku = "Standard"
frontend_ip_configuration {
name = "tf-demo-lb-publicIPAddress"
public_ip_address_id = azurerm_public_ip.testlb.id
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_lb_backend_address_pool" "test" {
loadbalancer_id = azurerm_lb.test.id
name = "tf-demo-BackEndAddressPool"
}
resource "azurerm_network_interface_backend_address_pool_association" "test" {
count = 2
network_interface_id = "${azurerm_network_interface.test[count.index].id}"
ip_configuration_name = "tf-demo-nic-config${count.index}"
backend_address_pool_id = azurerm_lb_backend_address_pool.test.id
}
resource "azurerm_lb_probe" "test" {
resource_group_name = azurerm_resource_group.test.name
loadbalancer_id = azurerm_lb.test.id
name = "tf-demo-http-running-probe"
protocol = "Http"
port = 80
request_path = "/"
}
resource "azurerm_lb_rule" "test" {
resource_group_name = azurerm_resource_group.test.name
loadbalancer_id = azurerm_lb.test.id
name = "tf-demo-LBRule"
protocol = "Tcp"
frontend_port = 80
backend_port = 80
frontend_ip_configuration_name = "tf-demo-lb-publicIPAddress"
backend_address_pool_id = azurerm_lb_backend_address_pool.test.id
probe_id = azurerm_lb_probe.test.id
}
resource "azurerm_public_ip" "test" {
count = 2
name = "tf-demo-publicIPForVM${count.index}"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
sku = "Standard"
allocation_method = "Static"
domain_name_label = "acngrvm${count.index}"
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_network_interface" "test" {
count = 2
name = "tf-demo-nic${count.index}"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
ip_configuration {
name = "tf-demo-nic-config${count.index}"
subnet_id = azurerm_subnet.test.id
private_ip_address_allocation = "dynamic"
public_ip_address_id = "${azurerm_public_ip.test[count.index].id}"
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_network_security_group" "test" {
name = "tf-demo-vm-nsg"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
security_rule {
name = "SSH"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_network_interface_security_group_association" "test" {
count = length(azurerm_network_interface.test)
network_interface_id = "${azurerm_network_interface.test[count.index].id}"
network_security_group_id = azurerm_network_security_group.test.id
}
resource "azurerm_availability_set" "test" {
name = "tf-demo-availabilityset"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
platform_fault_domain_count = 2
platform_update_domain_count = 2
managed = true
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_linux_virtual_machine" "test" {
count = 2
name = "tfdemovm${count.index}"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
network_interface_ids = [azurerm_network_interface.test[count.index].id]
size = "Standard_DS1_v2"
admin_username = "centos"
computer_name = "tfdemovm${count.index}"
availability_set_id = azurerm_availability_set.test.id
admin_ssh_key {
username = "centos"
public_key = file("~/.ssh/id_rsa.pub")
}
os_disk {
name = "tfdemovm${count.index}_OsDisk${count.index}"
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "OpenLogic"
offer = "CentOS"
sku = "7_8-gen2"
version = "latest"
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
Based on the comments.
The issue was caused by not opening port 80 in azurerm_network_security_group.test. Only port 22 was allowed. Thus opening port 80 solved the issue.
I'm Setting up AKS cluster using userDefinedRouting with existing subnet and route table which are associated with network security group. Here is my code snippet.
provider "azurerm" {
version = "~> 2.25"
features {}
}
data "azurerm_resource_group" "aks" {
name = var.resource_group
}
#fetch existing subnet
data "azurerm_subnet" "aks" {
name = var.subnetname
virtual_network_name = var.virtual_network_name
resource_group_name = var.vnet_resource_group
}
resource "azurerm_network_interface" "k8svmnic" {
name = "k8svmnic"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.aks.id
private_ip_address_allocation = "Static"
private_ip_address = var.k8svmip #"10.9.56.10"
}
}
resource "azurerm_availability_set" "k8svmavset" {
name = "k8svmavset"
location = data.azurerm_resource_group.aks.location
resource_group_name = data.azurerm_resource_group.aks.name
platform_fault_domain_count = 3
platform_update_domain_count = 3
managed = true
}
resource "azurerm_network_security_group" "k8svmnsg" {
name = "k8vm-nsg"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
security_rule {
name = "allow_kube_tls"
protocol = "Tcp"
priority = 100
direction = "Inbound"
access = "Allow"
source_address_prefix = "VirtualNetwork"
destination_address_prefix = "*"
source_port_range = "*"
#destination_port_range = "443"
destination_port_ranges = ["443"]
description = "Allow kube-apiserver (tls) traffic to master"
}
security_rule {
name = "allow_ssh"
protocol = "Tcp"
priority = 101
direction = "Inbound"
access = "Allow"
source_address_prefix = "*"
destination_address_prefix = "*"
source_port_range = "*"
#destination_port_range = "22"
destination_port_ranges = ["22"]
description = "Allow SSH traffic to master"
}
}
resource "azurerm_network_interface_security_group_association" "k8svmnicnsg" {
network_interface_id = azurerm_network_interface.k8svmnic.id
network_security_group_id = azurerm_network_security_group.k8svmnsg.id
}
resource "azurerm_linux_virtual_machine" "k8svm" {
name = "k8svm"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
size = "Standard_D3_v2"
admin_username = var.admin_username
disable_password_authentication = true
availability_set_id = azurerm_availability_set.k8svmavset.id
network_interface_ids = [
azurerm_network_interface.k8svmnic.id,
]
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_key
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
disk_size_gb = 30
}
source_image_reference {
publisher = "microsoft-aks"
offer = "aks"
sku = "aks-engine-ubuntu-1804-202007"
version = "2020.07.24"
}
}
resource "azurerm_managed_disk" "k8svm-disk" {
name = "${azurerm_linux_virtual_machine.k8svm.name}-disk"
location = data.azurerm_resource_group.aks.location
resource_group_name = data.azurerm_resource_group.aks.name
storage_account_type = "Standard_LRS"
create_option = "Empty"
disk_size_gb = 512
}
resource "azurerm_virtual_machine_data_disk_attachment" "k8svm-disk-attachment" {
managed_disk_id = azurerm_managed_disk.k8svm-disk.id
virtual_machine_id = azurerm_linux_virtual_machine.k8svm.id
lun = 5
caching = "ReadWrite"
}
resource "azurerm_public_ip" "aks" {
name = "akspip"
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
allocation_method = "Static"
sku = "Standard"
depends_on = [azurerm_virtual_machine_data_disk_attachment.k8svm-disk-attachment]
}
resource "azurerm_route_table" "aks"{
name = "aks" #var.subnetname
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
disable_bgp_route_propagation = false
route {
name = "default_route"
address_prefix = "0.0.0.0/0"
next_hop_type = "VirtualAppliance"
next_hop_in_ip_address = var.k8svmip
}
route {
name = var.route_name
address_prefix = var.route_address_prefix
next_hop_type = var.route_next_hop_type
}
}
resource "azurerm_subnet_route_table_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
route_table_id = azurerm_route_table.aks.id
}
resource "azurerm_subnet_network_security_group_association" "aks" {
subnet_id = data.azurerm_subnet.aks.id
network_security_group_id = var.network_security_group
}
resource "null_resource" "previous" {}
resource "time_sleep" "wait_90_seconds" {
depends_on = [null_resource.previous]
create_duration = "90s"
}
# This resource will create (at least) 30 seconds after null_resource.previous
resource "null_resource" "next" {
depends_on = [time_sleep.wait_90_seconds]
}
resource "azurerm_kubernetes_cluster" "aks" {
name = data.azurerm_resource_group.aks.name
resource_group_name = data.azurerm_resource_group.aks.name
location = data.azurerm_resource_group.aks.location
dns_prefix = "akstfelk" #The dns_prefix must contain between 3 and 45 characters, and can contain only letters, numbers, and hyphens. It must start with a letter and must end with a letter or a number.
kubernetes_version = "1.18.8"
private_cluster_enabled = false
node_resource_group = var.node_resource_group
#api_server_authorized_ip_ranges = [] #var.api_server_authorized_ip_ranges
default_node_pool {
enable_node_public_ip = false
name = "agentpool"
node_count = var.node_count
orchestrator_version = "1.18.8"
vm_size = var.vm_size
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = data.azurerm_subnet.aks.id
type = "VirtualMachineScaleSets"
}
linux_profile {
admin_username = var.admin_username
ssh_key {
key_data = var.ssh_key
}
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
role_based_access_control {
enabled = true
}
network_profile {
network_plugin = "kubenet"
network_policy = "calico"
dns_service_ip = "172.16.1.10"
service_cidr = "172.16.0.0/16"
docker_bridge_cidr = "172.17.0.1/16"
pod_cidr = "172.40.0.0/16"
outbound_type = "userDefinedRouting"
load_balancer_sku = "Standard"
load_balancer_profile {
outbound_ip_address_ids = [ "${azurerm_public_ip.aks.id}" ]
}
# load_balancer_profile {
# managed_outbound_ip_count = 5
# #effective_outbound_ips = [ azurerm_public_ip.aks.id ]
# outbound_ip_address_ids = []
# outbound_ip_prefix_ids = []
# outbound_ports_allocated = 0
# }
}
addon_profile {
aci_connector_linux {
enabled = false
}
azure_policy {
enabled = false
}
http_application_routing {
enabled = false
}
kube_dashboard {
enabled = false
}
oms_agent {
enabled = false
}
}
depends_on = [azurerm_subnet_route_table_association.aks]
}
According to Azure doc it says: "By default, one public IP will automatically be created in the same resource group as the AKS cluster, if NO public IP, public IP prefix, or number of IPs is specified.
But in my case outbound connection not happening Hence cluster provision getting failed. I've even created another public Ip and trying through Loadbalancer profile but i'm getting below error.
Error: "network_profile.0.load_balancer_profile.0.managed_outbound_ip_count": conflicts with network_profile.0.load_balancer_profile.0.outbound_ip_address_ids
If i've removed loadbalancer_profile from script i'm getting below error
Error: creating Managed Kubernetes Cluster "aks-tf" (Resource Group "aks-tf"): containerservice.ManagedClustersClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="InvalidUserDefinedRoutingWithLoadBalancerProfile" Message="UserDefinedRouting and load balancer profile are mutually exclusive. Please refer to http://aka.ms/aks/outboundtype for more details" Target="networkProfile.loadBalancerProfile"
Kinldy help me where i'm missing .
Any help would be appreciated.
When you use the UserDefineRouting, you need to set the network_plugin as azure and put the AKS cluster inside the subnet with the user-defined router, here is the description:
The AKS cluster must be deployed into an existing virtual network with
a subnet that has been previously configured.
And if the network_plugin is set to azure, then the vnet_subnet_id field in the default_node_pool block must be set and pod_cidr must not be set. You can find this note in azurerm_kubernetes_cluster.
Update:
It's a little more complex than you think, here is the Network Architecture of it and steps to create it via CLI. This architecture requires explicitly sending egress traffic to an appliance like a firewall, gateway, proxy or to allow the Network Address Translation (NAT) to be done by a public IP assigned to the standard load balancer or appliance.
For the outbound, instead of a Public Load Balancer you can use an internal Load Balancer for internal traffic.
In addition, some steps you cannot achieve via the Terraform, for example, the Azure Firewall. Take a look at the steps and prepare the resources which you cannot achieve via the CLI.
Do not understand when the Output command is used in azure terraform? Where is the output going? Why do we need this output for? We have multiple tf files in aws which I am converting to azure. What type of information is outputted?
Without this output information what would happen?
Please provide examples.
Thank you
A lot of questions, I'm not sure I can cover all this but I can deo give an example.
A good practice would be to build terraform modules, then use the output from these modules to pass along arguments to other modules consuming the same type of input.
The following is a "base module" that creates a resource group, a Vnet, couple of subnets (external and internal for example), NSG and binds the rules to the subnets
resource "azurerm_resource_group" "main_rg" {
name = var.resource_group_name
location = var.location
tags = {
group = var.resource_group_name
Customer = var.tag_reference
stack = "resource group"
}
}
resource "azurerm_virtual_network" "main_vnet" {
name = "${azurerm_resource_group.main_rg.name}-primary-vnet"
address_space = ["${var.vnet_cidr}"]
location = azurerm_resource_group.main_rg.location
resource_group_name = azurerm_resource_group.main_rg.name
tags = {
group = var.resource_group_name
Customer = var.tag_reference
stack = "virtual network"
}
}
resource "azurerm_subnet" "backend_subnet" {
name = "${azurerm_resource_group.main_rg.name}-backend-subnet"
resource_group_name = azurerm_resource_group.main_rg.name
virtual_network_name = azurerm_virtual_network.main_vnet.name
address_prefix = var.backend_subnet_cidr
service_endpoints = ["Microsoft.Sql", "Microsoft.Storage"]
}
resource "azurerm_subnet" "frontend_subnet" {
name = "${azurerm_resource_group.main_rg.name}-frontend-subnet"
resource_group_name = azurerm_resource_group.main_rg.name
virtual_network_name = azurerm_virtual_network.main_vnet.name
address_prefix = var.frontend_subnet_cidr
service_endpoints = ["Microsoft.Sql"]
}
resource "azurerm_network_security_group" "default_nsg" {
name = "${azurerm_resource_group.main_rg.name}-nsg"
location = azurerm_resource_group.main_rg.location
resource_group_name = azurerm_resource_group.main_rg.name
security_rule {
name = "appgwV1Exception"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "65503-65534"
source_address_prefix = "Internet"
destination_address_prefix = "*"
description = "This is needed to allow helth check of backend server to pass according to official documentation"
}
security_rule {
name = "appgwV2Exception"
priority = 200
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "65200-65535"
source_address_prefix = "Internet"
destination_address_prefix = "*"
description = "This is needed to allow helth check of backend server to pass according to official documentation"
}
security_rule {
name = "Office"
priority = 500
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "443"
source_address_prefix = "1.2.3.4/32"
destination_address_prefix = "*"
description = "Allow 443 access from the office"
}
tags = {
group = var.resource_group_name
Customer = var.tag_reference
stack = "NSG"
}
}
resource "azurerm_subnet_network_security_group_association" "nsg_to_backend" {
network_security_group_id = azurerm_network_security_group.default_nsg.id
subnet_id = azurerm_subnet.backend_subnet.id
}
resource "azurerm_subnet_network_security_group_association" "nsg_to_frontend" {
network_security_group_id = azurerm_network_security_group.default_nsg.id
subnet_id = azurerm_subnet.frontend_subnet.id
}
output "resource_group_name" {
value = azurerm_resource_group.main_rg.name
}
output "vnet_name" {
value = azurerm_virtual_network.main_vnet.name
}
output "vnet_id" {
value = azurerm_virtual_network.main_vnet.id
}
output "backend_subnet_id" {
value = azurerm_subnet.backend_subnet.id
}
output "frontend_subnet_id" {
value = azurerm_subnet.frontend_subnet.id
}
output "nsg_id" {
value = azurerm_network_security_group.default_nsg.id
}
Again the terraform best practice is to use an output file but I'll spare you that for now, see the outputs at the bottom of the file, now I have another module for creating a VM, so my main.tf if you will or the complete file might look a bit something like this:
provider "azurerm" {
version = "~> 1.21"
}
terraform {
backend "azurerm" {}
}
module "base_infra" {
source = "../../base_infra"
location = var.location
resource_group_name = var.resource_group_name
vnet_cidr = var.vnet_cidr
backend_subnet_cidr = var.backend_subnet_cidr
frontend_subnet_cidr = var.frontend_subnet_cidr
tag_reference = var.tag_reference
}
module "webapp_vm" {
source = "../../webapp"
resource_group_name = module.base_infra.resource_group_name
location = var.location
vnet_cidr = module.base_infra.main_vnet_id
subnet_id = module.base_infra.backend_subnet_id
tag_reference = var.tag_reference
datadisk_size_gb = "200"
instance_count = "1"
instance_name_prefix = "${module.base_infra.resource_group_name}-webapp"
vm_size = var.vm_size
vm_username = var.vm_username
vm_password = module.webapp_vm_password.password_result
}
I did cut some corners in my answer, hope I gave you enough