Terraform Azure creating vm in multiple availability set - azure

I am trying to create multiple Azure VM and not able to assign VMs to different availability_set. Please see my code below:
Module "vm_dev":
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.82.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "rg_dev" {
name = "MYORG_RG_DEV"
location = var.location
}
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
vm.tf:
module "vm_dev" {
source = "./vm_dev"
vm_size = "Standard_D4s_v3"
hostname_prefix = "myorg"
group_name_prefix = var.group_prefix
location = var.location
subnet_id = local.subnet_id
ssh_key = local.ssh_public_key
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1" }
"aa-elastic-master-1" = { ip = "10.0.100.2" }
"xx-elastic-master-0" = { ip = "10.0.99.1" }
"xx-elastic-master-1" = { ip = "10.0.99.2" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3" }
"aa-elastic-data-1" = { ip = "10.0.100.4" }
"aa-elastic-data-2" = { ip = "10.0.100.5" }
"xx-elastic-data-0" = { ip = "10.0.99.3" }
"xx-elastic-data-1" = { ip = "10.0.99.4" }
"xx-elastic-data-2" = { ip = "10.0.99.5" }
}
}
This works fine and I am able to create VMs. So far each VM is being created without assigning to availability_set. I would like to specify to which availability_set each VM belongs to, something like this:
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1", as = "azurerm_availability_set.as_aamaster.id" }
"aa-elastic-master-1" = { ip = "10.0.100.2", as = "azurerm_availability_set.as_aamaster.id" }
"xx-elastic-master-0" = { ip = "10.0.99.1", as = "azurerm_availability_set.as_xxmaster.id" }
"xx-elastic-master-1" = { ip = "10.0.99.2", as = "azurerm_availability_set.as_xxmaster.id" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-1" = { ip = "10.0.100.4", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-2" = { ip = "10.0.100.5", as = "azurerm_availability_set.as_aadata.id" }
"xx-elastic-data-0" = { ip = "10.0.99.3", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-1" = { ip = "10.0.99.4", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-2" = { ip = "10.0.99.5", as = "azurerm_availability_set.as_xxdata.id" }
}
adding in module following code:
resource "azurerm_availability_set" "as_aamaster" {
name = "${var.hostname_prefix}-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
...
gives me error
Error: Cannot parse Azure ID: parse "azurerm_availability_set.as_aamaster.id": invalid URI for request
on vm_dev/main.tf line 72, in resource "azurerm_linux_virtual_machine" "node_master":
72: availability_set_id = each.value.as
Any advice is appreciated.
Thanks

I tested your code and it failed with the same error as below :
So , For Solution as Ivan Ignatiev has suggested you will have to use the below :
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "${azurerm_availability_set.as_aamaster.id}" }
"aa-elastic-master-1" = { ip = "10.0.2.2", as = "${azurerm_availability_set.as_aamaster.id}" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "${azurerm_availability_set.as_xxmaster.id}" }
"xx-elastic-master-1" = { ip = "10.0.2.4", as = "${azurerm_availability_set.as_xxmaster.id}" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.2.5", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-1" = { ip = "10.0.2.6", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-2" = { ip = "10.0.2.7", as = "${azurerm_availability_set.as_aadata.id}" }
"xx-elastic-data-0" = { ip = "10.0.2.8", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-1" = { ip = "10.0.2.9", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-2" = { ip = "10.0.2.10", as = "${azurerm_availability_set.as_xxdata.id}" }
}
main.tf
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aamaster" {
name = "ansuman-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxmaster" {
name = "ansuman-as-xxmaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aadata" {
name = "ansuman-as-aadata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxdata" {
name = "ansuman-as-xxdata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
Output:

AnsumanBal-MT, this did not work for me, I have added comment above, but i was able to solve this via:
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "0" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "1" }
in module:
resource "azurerm_availability_set" "as_dev" {
count = 5
name = "${var.hostname_prefix}-dev-${element(var.availability_set_name, count.index)}-as"
resource_group_name = azurerm_resource_group.rg_dev.name
location = var.location
}
for azurerm_linux_virtual_machine added:
availability_set_id = azurerm_availability_set.as_dev[each.value.as].id
variable:
variable "availability_set_name" {
description = "Availability set name that the VMs will be created in"
type = list(any)
default = ["aamaster", "xxmaster", "aadata", ....]
}

Related

Terraform Error: Failed to query available provider packages

I'm trying to deploy a simple infrastructure in Azure through Terraform, the infrastructure is made of an Application Gateway (with Web Application Firewall, so the WAF_v2 version) with two virtual machines in the backend.
At the beginning I have implemented the Application Gateway (Standard_v2) without the WAF, and worked properly, but when I have implemented the WAF, I got the following error after lunching "terraform init" command (see attached screenshot also):
Error: Failed to query available provider packages
│
│ Could not retrieve the list of available versions for provider hashicorp/example: provider registry registry.terraform.io does not have a provider named
│ registry.terraform.io/hashicorp/example
│
│ All modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending
│ on hashicorp/example, run the following command:
│ terraform providers
So I run the command "terraform providers" as suggested by Terraform and got this:
Providers required by configuration:
.
├── provider[registry.terraform.io/hashicorp/azurerm] >= 2.97.0
├── provider[registry.terraform.io/hashicorp/example]
└── provider[registry.terraform.io/hashicorp/random]
In the following you can see the Terraform code of my infrastructure:
terraform {
required_version = ">=0.12"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=2.97.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "rg1" {
name = "myResourceGroupAG"
location = "francecentral"
}
resource "example_wafpolicy" "exampleWAF" {
name = "example_wafpolicy_name"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
custom_rules {
name = "Rule1"
priority = 1
rule_type = "MatchRule"
match_conditions {
match_variables {
variable_name = "RemoteAddr"
}
operator = "IPMatch"
negation_condition = false
match_values = ["XX.XX.XX.XX"]
}
action = "Block"
}
policy_settings {
enabled = true
mode = "Prevention"
request_body_check = true
file_upload_limit_in_mb = 100
max_request_body_size_in_kb = 128
}
managed_rules {
managed_rule_set {
type = "OWASP"
version = "3.2"
}
}
}
resource "azurerm_virtual_network" "vnet1" {
name = "myVNet"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
address_space = ["10.21.0.0/16"]
}
resource "azurerm_subnet" "frontend" {
name = "myAGSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.0.0/24"]
}
resource "azurerm_subnet" "backend" {
name = "myBackendSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.1.0/24"]
}
resource "azurerm_public_ip" "pip1" {
name = "myAGPublicIPAddress"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_application_gateway" "network" {
name = "myAppGateway"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
sku {
name = "WAF_v2"
tier = "WAF_v2"
capacity = 2
}
gateway_ip_configuration {
name = "my-gateway-ip-configuration"
subnet_id = azurerm_subnet.frontend.id
}
frontend_port {
name = var.frontend_port_name
port = 80
}
frontend_ip_configuration {
name = var.frontend_ip_configuration_name
public_ip_address_id = azurerm_public_ip.pip1.id
}
backend_address_pool {
name = var.backend_address_pool_name
}
backend_http_settings {
name = var.http_setting_name
cookie_based_affinity = "Disabled"
port = 80
protocol = "Http"
request_timeout = 20
}
http_listener {
name = var.listener_name
frontend_ip_configuration_name = var.frontend_ip_configuration_name
frontend_port_name = var.frontend_port_name
protocol = "Http"
firewall_policy_id = example_wafpolicy.exampleWAF.id
}
request_routing_rule {
name = var.request_routing_rule_name
rule_type = "Basic"
priority = 25
http_listener_name = var.listener_name
backend_address_pool_name = var.backend_address_pool_name
backend_http_settings_name = var.http_setting_name
}
firewall_policy_id {
id = example_wafpolicy.exampleWAF.id
}
waf_configuration {
content{
enabled = lookup(waf_configuration.value,"enabled",true)
file_upload_limit_mb = lookup(waf_configuration.value,"file_upload_limit_mb",30)
firewall_mode = lookup(waf_configuration.value,"firewall_mode","Prevention")
max_request_body_size_kb = lookup(waf_configuration.value,"max_request_body_size_kb",128)
request_body_check = lookup(waf_configuration.value,"request_body_check",true)
rule_set_type = lookup(waf_configuration.value,"rule_set_type","OWASP")
rule_set_version = lookup(waf_configuration.value,"rule_set_version", "3.1")
}
}
}
resource "azurerm_network_interface" "nic" {
count = 2
name = "nic-${count.index+1}"
location = azurerm_resource_group.rg1.location
resource_group_name = azurerm_resource_group.rg1.name
ip_configuration {
name = "nic-ipconfig-${count.index+1}"
subnet_id = azurerm_subnet.backend.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_network_interface_application_gateway_backend_address_pool_association" "nic-assoc01" {
count = 2
network_interface_id = azurerm_network_interface.nic[count.index].id
ip_configuration_name = "nic-ipconfig-${count.index+1}"
backend_address_pool_id = tolist(azurerm_application_gateway.network.backend_address_pool).0.id
}
resource "random_password" "password" {
length = 16
special = true
lower = true
upper = true
numeric = true
}
resource "azurerm_windows_virtual_machine" "vm" {
count = 2
name = "myVM${count.index+1}"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
size = "Standard_DS1_v2"
admin_username = "azureadmin"
admin_password = random_password.password.result
network_interface_ids = [
azurerm_network_interface.nic[count.index].id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2019-Datacenter"
version = "latest"
}
}
resource "azurerm_virtual_machine_extension" "vm-extensions" {
count = 2
name = "vm${count.index+1}-ext"
virtual_machine_id = azurerm_windows_virtual_machine.vm[count.index].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.10"
settings = <<SETTINGS
{
"commandToExecute": "powershell Add-WindowsFeature Web-Server; powershell Add-Content -Path \"C:\\inetpub\\wwwroot\\Default.htm\" -Value $($env:computername)"
}
SETTINGS
}
In the following the script with the variables:
variable "backend_address_pool_name" {
default = "myBackendPool"
}
variable "frontend_port_name" {
default = "myFrontendPort"
}
variable "frontend_ip_configuration_name" {
default = "myAGIPConfig"
}
variable "http_setting_name" {
default = "myHTTPsetting"
}
variable "listener_name" {
default = "myListener"
}
variable "request_routing_rule_name" {
default = "myRoutingRule"
}
variable "redirect_configuration_name" {
default = "myRedirectConfig"
}
variable "example_wafpolicy_name" {
default = "myFirewallPolicy"
}
At the beginning of the code you can see match_values = ["XX.XX.XX.XX"], the IP address is set in this manner just for opening this question in Stackoverflow, normally in my code there is a normal IP address.
I would really appreciate your help to fix this error and in general to deploy an Application Gateway with WAF and two virtual machines in the backend in Azure through Terraform.
I have tried to search something online but it seems that this topic has never been opened by someone.
Issue was caused because of the naming convention which used in terraform code base "example_wafpolicy" and terraform provider.
Solution:
Need to replace with below mention resource tag
resource "azurerm_web_application_firewall_policy" "example" {
Replicated the same code base in local, please find below code snippet.
Main if file as follows:
resource "azurerm_resource_group" "rg1" {
name = "************"
location = "West Europe"
}
resource "azurerm_web_application_firewall_policy" "exampleWAF" {
name = "example_wafpolicy_name"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
custom_rules {
name = "Rule1"
priority = 1
rule_type = "MatchRule"
match_conditions {
match_variables {
variable_name = "RemoteAddr"
}
operator = "IPMatch"
negation_condition = false
match_values = ["192.168.1.0/24", "10.0.0.0/24"]
}
action = "Block"
}
policy_settings {
enabled = true
mode = "Prevention"
request_body_check = true
file_upload_limit_in_mb = 100
max_request_body_size_in_kb = 128
}
managed_rules {
managed_rule_set {
type = "OWASP"
version = "3.2"
}
}
}
resource "azurerm_virtual_network" "vnet1" {
name = "myVNet"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
address_space = ["10.21.0.0/16"]
}
resource "azurerm_subnet" "frontend" {
name = "myAGSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.0.0/24"]
}
resource "azurerm_subnet" "backend" {
name = "myBackendSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.1.0/24"]
}
resource "azurerm_public_ip" "pip1" {
name = "myAGPublicIPAddress"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
allocation_method = "Dynamic"
sku = "Basic"
}
locals {
backend_address_pool_name = "${azurerm_virtual_network.vnet1.name}-beap"
frontend_port_name = "${azurerm_virtual_network.vnet1.name}-feport"
frontend_ip_configuration_name = "${azurerm_virtual_network.vnet1.name}-feip"
http_setting_name = "${azurerm_virtual_network.vnet1.name}-be-htst"
listener_name = "${azurerm_virtual_network.vnet1.name}-httplstn"
request_routing_rule_name = "${azurerm_virtual_network.vnet1.name}-rqrt"
redirect_configuration_name = "${azurerm_virtual_network.vnet1.name}-rdrcfg"
}
resource "azurerm_application_gateway" "network" {
name = "example-appgateway"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
sku {
name = "Standard_Small"
tier = "Standard"
capacity = 2
}
gateway_ip_configuration {
name = "my-gateway-ip-configuration"
subnet_id = azurerm_subnet.frontend.id
}
frontend_port {
name = local.frontend_port_name
port = 80
}
frontend_ip_configuration {
name = local.frontend_ip_configuration_name
public_ip_address_id = azurerm_public_ip.pip1.id
}
backend_address_pool {
name = local.backend_address_pool_name
}
backend_http_settings {
name = local.http_setting_name
cookie_based_affinity = "Disabled"
path = "/path1/"
port = 80
protocol = "Http"
request_timeout = 60
}
http_listener {
name = local.listener_name
frontend_ip_configuration_name = local.frontend_ip_configuration_name
frontend_port_name = local.frontend_port_name
protocol = "Http"
}
request_routing_rule {
name = local.request_routing_rule_name
rule_type = "Basic"
http_listener_name = local.listener_name
backend_address_pool_name = local.backend_address_pool_name
backend_http_settings_name = local.http_setting_name
}
}
resource "azurerm_network_interface" "nic" {
count = 2
name = "nic-${count.index+1}"
location = azurerm_resource_group.rg1.location
resource_group_name = azurerm_resource_group.rg1.name
ip_configuration {
name = "nic-ipconfig-${count.index+1}"
subnet_id = azurerm_subnet.backend.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_network_interface_application_gateway_backend_address_pool_association" "nic-assoc01" {
count = 2
network_interface_id = azurerm_network_interface.nic[count.index].id
ip_configuration_name = "nic-ipconfig-${count.index+1}"
backend_address_pool_id = tolist(azurerm_application_gateway.network.backend_address_pool).0.id
}
resource "random_password" "password" {
length = 16
special = true
lower = true
upper = true
numeric = true
}
resource "azurerm_windows_virtual_machine" "vm" {
count = 2
name = "myVM${count.index+1}"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
size = "Standard_DS1_v2"
admin_username = "azureadmin"
admin_password = random_password.password.result
network_interface_ids = [
azurerm_network_interface.nic[count.index].id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2019-Datacenter"
version = "latest"
}
}
resource "azurerm_virtual_machine_extension" "vm-extensions" {
count = 2
name = "vm${count.index+1}-ext"
virtual_machine_id = azurerm_windows_virtual_machine.vm[count.index].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.10"
settings = <<SETTINGS
{
"commandToExecute": "powershell Add-WindowsFeature Web-Server; powershell Add-Content -Path \"C:\\inetpub\\wwwroot\\Default.htm\" -Value $($env:computername)"
}
SETTINGS
}
provider tf file as follows:
terraform {
required_version = "~>1.3.3"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=3.0.0"
}
}
}
provider "azurerm" {
features {}
skip_provider_registration = true
}
upon running terraform plan and apply
terraform plan
terraform apply -auto-approve
Plan as follows:
Apply as follows
Azure Portal verification:

how to deploy two vm to two different resource group

Does anyone could give a hint on how should i do. i want to deploy 4 Az linux VM but 2 in each different resource group.
eg: rg-one will have 2 linux and rg-two will the other two
the way i did it is too create two block with azure_linux_virtual_machine, it is working but i was wondering if it could not be simplify.
thank in advance for the head up
here a snipped what i have done.
# Fetch exisiting resource_group
data "azurerm_resource_group" "rg-dock001" {
name = var.resource_group01
}
# Fetch vm network
data "azurerm_virtual_network" "vm_network" {
name = var.vm_network
resource_group_name = var.rg_name_network
}
output "azurerm_virtual_network" {
value = data.azurerm_virtual_network.vm_network.id
}
# Fetch vm subnet
data "azurerm_subnet" "vm_subnet" {
name = var.vm_subnet
resource_group_name = var.rg_name_network
virtual_network_name = var.vm_network
}
output "subnet_id" {
value = data.azurerm_subnet.vm_subnet.id
}
resource "azurerm_network_interface" "ens124-01" {
name = var.vm_nic01[count.index]
count = length(var.vm_nic01)
location = var.rg_location
resource_group_name = var.resource_group01
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet.id
private_ip_address_allocation = "Static"
private_ip_address = "10.241.25.${count.index + 10}"
}
tags = var.vm_tags
}
output "private_ip01" {
value = length(azurerm_network_interface.ens124-01.*.private_ip_address)
}
# Fetch existing image
data "azurerm_image" "custom_docker_image" {
name_regex = var.packer_image
sort_descending = true
resource_group_name = var.resource_group_image
}
output "image_id" {
value = data.azurerm_image.custom_docker_image.id
}
# create and display an SSH key
resource "tls_private_key" "ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" {
value = tls_private_key.ssh.private_key_pem
sensitive = true
}
resource "azurerm_linux_virtual_machine" "main01" {
name = var.vm_name01[count.index]
count = length(var.vm_name01)
resource_group_name = var.resource_group01
location = var.rg_location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = ["${element(azurerm_network_interface.ens124-01.*.id, count.index)}"]
source_image_id = data.azurerm_image.custom_docker_image.id
computer_name = var.vm_name01[count.index]
admin_ssh_key {
username = var.ssh_username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-0${count.index + 1}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
tags = var.vm_tags
}
data "azurerm_resource_group" "rg-dock002" {
name = var.resource_group02
}
resource "azurerm_network_interface" "ens124-02" {
name = var.vm_nic02[count.index]
count = length(var.vm_nic02)
location = var.rg_location
resource_group_name = var.resource_group02
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet.id
private_ip_address_allocation = "Static"
private_ip_address = "10.241.25.${count.index + 20}"
}
tags = var.vm_tags
}
output "private_ip02" {
value = length(azurerm_network_interface.ens124-02.*.private_ip_address)
}
resource "azurerm_linux_virtual_machine" "main02" {
name = var.vm_name02[count.index]
count = length(var.vm_name02)
resource_group_name = var.resource_group02
location = var.rg_location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = ["${element(azurerm_network_interface.ens124-02.*.id, count.index)}"]
source_image_id = data.azurerm_image.custom_docker_image.id
computer_name = var.vm_name02[count.index]
admin_ssh_key {
username = var.ssh_username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-0${count.index + 1}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
tags = var.vm_tags
}````
As per your requirement you can use something like below :
provider "azurerm" {
features {}
}
variable "VM" {
default= {
vm1={
rg_name= "ajaytest",
vnet_name="ajay-vnet",
subnet_name="default",
nic_name = "ansumanVM-nic",
private_ip="10.0.0.10",
vm_name="ansumanVM"
},
vm2={
rg_name= "kartiktest",
vnet_name="kartik-vnet",
subnet_name="default",
nic_name="terraformVM-nic",
private_ip="10.0.0.20",
vm_name="terraformVM"
}
}
}
variable "username" {
default="ansuman"
}
variable "password" {
default="Password#1234!"
}
data "azurerm_shared_image_version" "example" {
name = "0.0.1"
image_name = "UbuntuwithNginxinstalled"
gallery_name = "ansumantestgallery"
resource_group_name = "ansumantest"
}
data "azurerm_resource_group" "rg-dock" {
for_each = var.VM
name = each.value["rg_name"]
}
# Fetch vm network
data "azurerm_virtual_network" "vm_network" {
for_each = var.VM
name = each.value["vnet_name"]
resource_group_name = each.value["rg_name"]
}
# Fetch vm subnet
data "azurerm_subnet" "vm_subnet" {
for_each = var.VM
name = each.value["subnet_name"]
resource_group_name = each.value["rg_name"]
virtual_network_name = each.value["vnet_name"]
}
resource "azurerm_network_interface" "ens124" {
for_each = var.VM
name = each.value["nic_name"]
location = data.azurerm_resource_group.rg-dock[each.key].location
resource_group_name = data.azurerm_resource_group.rg-dock[each.key].name
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet[each.key].id
private_ip_address_allocation = "Static"
private_ip_address = each.value["private_ip"]
}
}
# create and display an SSH key
resource "tls_private_key" "ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" {
value = tls_private_key.ssh.private_key_pem
sensitive = true
}
resource "azurerm_linux_virtual_machine" "main" {
for_each = var.VM
name = each.value["vm_name"]
resource_group_name = data.azurerm_resource_group.rg-dock[each.key].name
location = data.azurerm_resource_group.rg-dock[each.key].location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = [azurerm_network_interface.ens124[format("%s", each.key)].id]
source_image_id = data.azurerm_shared_image_version.example.id
computer_name = each.key
admin_ssh_key {
username = var.username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-${each.key}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
}
Note: The admin username and the ssh username must be the same due the below limitation :
And the location of the VM ,Vnet ,Image and other resources must be the same.
Output:

Terraform Azure 2xLinux VM with httpd + LoadBalancer not working

I am trying to create a Terraform PoC that has two centos VMs and an Azure Load Balancer.
Each VM has one private and one public IP and installed the httpd package.
Even the elements are provisioned successful, accessing the Public IP of the Load Balancer does not return the default httpd content (inside the CentOS VM curl localhost or the IP returns the correct content).
No firewall enabled on CentOS.
Below it the Terraform file. (Location i am using is westeurope).
Q: What am I missing in the configuration for the Load Balancer? All items are provisioned, no error from terraform, when accessing the public ip of the load balancer I get time out instead of the default apache page.
resource "azurerm_resource_group" "test" {
name = var.rg_name
location = var.location
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_virtual_network" "test" {
name = var.vnet_name
address_space = ["192.168.0.0/16"]
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_subnet" "test" {
name = var.networks["subnet1"]
resource_group_name = azurerm_resource_group.test.name
virtual_network_name = azurerm_virtual_network.test.name
address_prefixes = ["192.168.0.0/24"]
}
resource "azurerm_public_ip" "testlb" {
name = "tf-demo-publicIPForLB"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
sku = "Standard"
allocation_method = "Static"
domain_name_label = "acndemo"
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_lb" "test" {
name = "tf-demo-loadBalancer"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
sku = "Standard"
frontend_ip_configuration {
name = "tf-demo-lb-publicIPAddress"
public_ip_address_id = azurerm_public_ip.testlb.id
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_lb_backend_address_pool" "test" {
loadbalancer_id = azurerm_lb.test.id
name = "tf-demo-BackEndAddressPool"
}
resource "azurerm_network_interface_backend_address_pool_association" "test" {
count = 2
network_interface_id = "${azurerm_network_interface.test[count.index].id}"
ip_configuration_name = "tf-demo-nic-config${count.index}"
backend_address_pool_id = azurerm_lb_backend_address_pool.test.id
}
resource "azurerm_lb_probe" "test" {
resource_group_name = azurerm_resource_group.test.name
loadbalancer_id = azurerm_lb.test.id
name = "tf-demo-http-running-probe"
protocol = "Http"
port = 80
request_path = "/"
}
resource "azurerm_lb_rule" "test" {
resource_group_name = azurerm_resource_group.test.name
loadbalancer_id = azurerm_lb.test.id
name = "tf-demo-LBRule"
protocol = "Tcp"
frontend_port = 80
backend_port = 80
frontend_ip_configuration_name = "tf-demo-lb-publicIPAddress"
backend_address_pool_id = azurerm_lb_backend_address_pool.test.id
probe_id = azurerm_lb_probe.test.id
}
resource "azurerm_public_ip" "test" {
count = 2
name = "tf-demo-publicIPForVM${count.index}"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
sku = "Standard"
allocation_method = "Static"
domain_name_label = "acngrvm${count.index}"
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_network_interface" "test" {
count = 2
name = "tf-demo-nic${count.index}"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
ip_configuration {
name = "tf-demo-nic-config${count.index}"
subnet_id = azurerm_subnet.test.id
private_ip_address_allocation = "dynamic"
public_ip_address_id = "${azurerm_public_ip.test[count.index].id}"
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_network_security_group" "test" {
name = "tf-demo-vm-nsg"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
security_rule {
name = "SSH"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_network_interface_security_group_association" "test" {
count = length(azurerm_network_interface.test)
network_interface_id = "${azurerm_network_interface.test[count.index].id}"
network_security_group_id = azurerm_network_security_group.test.id
}
resource "azurerm_availability_set" "test" {
name = "tf-demo-availabilityset"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
platform_fault_domain_count = 2
platform_update_domain_count = 2
managed = true
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
resource "azurerm_linux_virtual_machine" "test" {
count = 2
name = "tfdemovm${count.index}"
location = azurerm_resource_group.test.location
resource_group_name = azurerm_resource_group.test.name
network_interface_ids = [azurerm_network_interface.test[count.index].id]
size = "Standard_DS1_v2"
admin_username = "centos"
computer_name = "tfdemovm${count.index}"
availability_set_id = azurerm_availability_set.test.id
admin_ssh_key {
username = "centos"
public_key = file("~/.ssh/id_rsa.pub")
}
os_disk {
name = "tfdemovm${count.index}_OsDisk${count.index}"
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "OpenLogic"
offer = "CentOS"
sku = "7_8-gen2"
version = "latest"
}
tags = {
Owner = var.tags["Owner"]
Environment = var.tags["Environment"]
}
}
Based on the comments.
The issue was caused by not opening port 80 in azurerm_network_security_group.test. Only port 22 was allowed. Thus opening port 80 solved the issue.

How to extract private IP address from terraform module

Terraform code is below
module "centos-vm-author-2" {
source = "terraform.automation.temp.com.au/temp/temp-linux-vm/azurerm"
version = "6.7.0"
location = var.resource_location
resource_group_name = var.resource_group_name_2
vm_count = "1"
tags = local.tags
size = var.vm_size
hostname_prefix = var.hostname_prefix
hostname_suffix_start_range = "491"
image_publisher = "OpenLogic"
image_offer = "Centos"
image_sku = "7_9"
subnet_id = var.auth_pub_subnet_id
admin_username = "azureadmin"
availability_set_id = azurerm_availability_set.aemfeature1authoras.id
patching_tags = local.patching_tags
ansible_vault_key = var.ansible_vault_key
log_to_loganalytics = false
ou_tags = local.ou_tags
os_disk_size = var.os_size_gb
os_disk_type = var.storage_account_type
server_access_memberships = ["CN=DSTDEVOPS,OU=DistributionGroups,OU=Groups,OU=Resources,DC=temp,DC=int"]
sudoers = ["%DSTDEVOPS"]
data_disks = [
[
{
disk_size_gb = var.disk_size_gb
storage_account_type = var.storage_account_type
caching = "ReadWrite"
create_option = "Empty"
source_resource_id = ""
write_accelerator_enabled = false
}
]
]
}
resource "null_resource" "centos-vm-author-ansible" {
provisioner "local-exec" {
command = <<EOF
ansible-playbook -i '${join(",", azurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
EOF
}
depends_on = [
module.centos-vm-author-2
]
}
}
Basically I want to tell Ansible the Private IP onto which it should execute the role.
I am getting error like below,
Error: [0m[0m[1mReference to undeclared resource[0m
on main.tf line 236, in resource "null_resource" "centos-vm-author-ansible":
ansible-playbook -i '${join(",", [4mazurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
A managed resource "azurerm_network_interface" "centos-vm-author-2" has not
been declared in the root module.
Sincerely appreciate any help to understand what is the issue and how to have it resolved.
P.S: The TF Module code is like below:
resource "azurerm_network_interface" "main" {
count = var.vm_count
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-nic"
location = var.location
resource_group_name = var.resource_group_name
enable_accelerated_networking = var.enable_accelerated_networking
ip_configuration {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-ipconfig"
subnet_id = var.subnet_id
private_ip_address_allocation = var.private_ip_address_allocation
private_ip_address = var.private_ip_address
public_ip_address_id = var.enable_public_ip_address ? azurerm_public_ip.main[count.index].id : null
}
tags = var.tags
}
resource "azurerm_network_interface_backend_address_pool_association" "lbconf" {
count = var.backend_address_pool_id == null ? 0 : var.vm_count
network_interface_id = azurerm_network_interface.main[count.index].id
ip_configuration_name = azurerm_network_interface.main[count.index].ip_configuration[0].name
backend_address_pool_id = var.backend_address_pool_id
}
resource "azurerm_linux_virtual_machine" "main" {
count = var.vm_count
name = format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )
location = var.location
resource_group_name = var.resource_group_name
admin_username = var.admin_username
admin_ssh_key {
username = var.admin_username
public_key = chomp(tls_private_key.bootstrap_private_key.public_key_openssh)
}
disable_password_authentication = var.disable_password_authentication
network_interface_ids = [azurerm_network_interface.main[count.index].id]
size = var.size
availability_set_id = var.availability_set_id
source_image_reference {
publisher = var.image_publisher
offer = var.image_offer
sku = var.image_sku
version = var.image_version
}
os_disk {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-osdisk"
caching = "ReadWrite"
storage_account_type = var.os_disk_type
disk_size_gb = var.os_disk_size
}
dynamic "identity" {
for_each = var.identity
content {
type = identity.value["type"]
identity_ids = identity.value["type"] == "SystemAssigned" ? [] : identity.value["identity_ids"]
}
}
dynamic "plan" {
for_each = var.marketplace_image ? [1] : []
content {
name = var.image_sku
product = var.image_offer
publisher = var.image_publisher
}
}
# boot_diagnostics {
# storage_account_uri = var.boot_diagnostics_storage_uri
# }
tags = var.ou_tags == null ? merge(var.tags, var.patching_tags) : merge(var.tags, var.ou_tags, var.patching_tags)
}
To refer to your module, instead of:
azurerm_network_interface.centos-vm-author-2.*.private_ip_address
it should be:
module.centos-vm-author-2.private_ip_addresses

Inappropriate value for attribute "network_interface_id": string required

Error: Incorrect attribute value type
on resources.tf line 387, in resource "azurerm_network_interface_backend_address_pool_association" "main":*
387: network_interface_id = module.linuxservers.vm_ids
|----------------
| module.linuxservers.vm_ids is tuple with 3 elements
Inappropriate value for attribute "network_interface_id": string required
hi, i am trying to associate my 3 Azure VM to the LoadBalancer backend pool but getting below above error can you please guide me on this, my code is below.
Resource.tf
module "linuxservers" {
source = "Azure/compute/azurerm"
resource_group_name = azurerm_resource_group.main.name
vm_hostname = "Elastic"
nb_instances = 3
nb_public_ip = 0
remote_port = "22"
admin_username = var.admin_username
vm_os_publisher = "OpenLogic"
vm_os_offer = "CentOS"
vm_os_sku = "7.5"
vm_size = "Standard_D2as_v4"
ssh_key = "./putty_key.pub"
vnet_subnet_id = data.azurerm_subnet.elasticsearch.id
tags = var.tags
}
resource "azurerm_lb" "main"{
name = "Elastic_LB"
location = azurerm_resource_group.main.location
resource_group_name = azurerm_resource_group.main.name
frontend_ip_configuration{
name = "FrontEndIP"
subnet_id = data.azurerm_subnet.elasticsearch.id
}
}
resource "azurerm_lb_backend_address_pool" "main" {
resource_group_name = azurerm_resource_group.main.name
loadbalancer_id = azurerm_lb.main.id
name = "BackEndAddressPool"
}
resource "azurerm_network_interface_backend_address_pool_association" "main" {
ip_configuration_name = "Configuration-VMs"
network_interface_id = module.linuxservers.vm_ids
backend_address_pool_id = azurerm_lb_backend_address_pool.main.id
}
Module/main.tf
resource "azurerm_virtual_machine" "vm-linux" {
count = ! contains(list(var.vm_os_simple, var.vm_os_offer), "WindowsServer") && ! var.is_windows_image ? var.nb_instances : 0
name = "${var.vm_hostname}-${count.index}"
resource_group_name = data.azurerm_resource_group.vm.name
location = coalesce(var.location, data.azurerm_resource_group.vm.location)
availability_set_id = azurerm_availability_set.vm.id
vm_size = var.vm_size
network_interface_ids = [element(azurerm_network_interface.vm.*.id, count.index)]
delete_os_disk_on_termination = var.delete_os_disk_on_termination
dynamic identity {
for_each = length(var.identity_ids) == 0 && var.identity_type == "SystemAssigned" ? [var.identity_type] : []
content {
type = var.identity_type
}
}
dynamic identity {
for_each = length(var.identity_ids) > 0 || var.identity_type == "UserAssigned" ? [var.identity_type] : []
content {
type = var.identity_type
identity_ids = length(var.identity_ids) > 0 ? var.identity_ids : []
}
}
storage_image_reference {
id = var.vm_os_id
publisher = var.vm_os_id == "" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : ""
offer = var.vm_os_id == "" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : ""
sku = var.vm_os_id == "" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : ""
version = var.vm_os_id == "" ? var.vm_os_version : ""
}
storage_os_disk {
name = "osdisk-${var.vm_hostname}-${count.index}"
create_option = "FromImage"
caching = "ReadWrite"
managed_disk_type = var.storage_account_type
}
dynamic storage_data_disk {
for_each = range(var.nb_data_disk)
content {
name = "${var.vm_hostname}-datadisk-${count.index}-${storage_data_disk.value}"
create_option = "Empty"
lun = storage_data_disk.value
disk_size_gb = var.data_disk_size_gb
managed_disk_type = var.data_sa_type
}
}
os_profile {
computer_name = "${var.vm_hostname}-${count.index}"
admin_username = var.admin_username
admin_password = var.admin_password
custom_data = var.custom_data
}
}
}
tags = var.tags
boot_diagnostics {
enabled = var.boot_diagnostics
storage_uri = var.boot_diagnostics ? join(",", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : ""
}
}
module/output.tf
output "vm_ids" {
description = "Virtual machine ids created."
value = concat(azurerm_virtual_machine.vm-windows.*.id, azurerm_virtual_machine.vm-linux.*.id)
}
output "network_security_group_id" {
description = "id of the security group provisioned"
value = azurerm_network_security_group.vm.id
}
output "network_security_group_name" {
description = "name of the security group provisioned"
value = azurerm_network_security_group.vm.name
}
output "network_interface_ids" {
description = "ids of the vm nics provisoned."
value = azurerm_network_interface.vm.*.id
}
output "network_interface_private_ip" {
description = "private ip addresses of the vm nics"
value = azurerm_network_interface.vm.*.private_ip_address
}
You need to configure it for each element from 'ids` so it should be like:
resource "azurerm_network_interface_backend_address_pool_association" "main" {
count = var.node_count
ip_configuration_name = "Configuration-VMs-${count.index}"
network_interface_id = element(module.linuxservers.vm_ids, count.index)
backend_address_pool_id = azurerm_lb_backend_address_pool.main.id
}
I'm not sure about syntax here element(module.linuxservers.vm_ids, count.index), because I don't see where this output is defined (maybe I'm blind). But this is pretty much what you need.

Resources