Error: Incorrect attribute value type
on resources.tf line 387, in resource "azurerm_network_interface_backend_address_pool_association" "main":*
387: network_interface_id = module.linuxservers.vm_ids
|----------------
| module.linuxservers.vm_ids is tuple with 3 elements
Inappropriate value for attribute "network_interface_id": string required
hi, i am trying to associate my 3 Azure VM to the LoadBalancer backend pool but getting below above error can you please guide me on this, my code is below.
Resource.tf
module "linuxservers" {
source = "Azure/compute/azurerm"
resource_group_name = azurerm_resource_group.main.name
vm_hostname = "Elastic"
nb_instances = 3
nb_public_ip = 0
remote_port = "22"
admin_username = var.admin_username
vm_os_publisher = "OpenLogic"
vm_os_offer = "CentOS"
vm_os_sku = "7.5"
vm_size = "Standard_D2as_v4"
ssh_key = "./putty_key.pub"
vnet_subnet_id = data.azurerm_subnet.elasticsearch.id
tags = var.tags
}
resource "azurerm_lb" "main"{
name = "Elastic_LB"
location = azurerm_resource_group.main.location
resource_group_name = azurerm_resource_group.main.name
frontend_ip_configuration{
name = "FrontEndIP"
subnet_id = data.azurerm_subnet.elasticsearch.id
}
}
resource "azurerm_lb_backend_address_pool" "main" {
resource_group_name = azurerm_resource_group.main.name
loadbalancer_id = azurerm_lb.main.id
name = "BackEndAddressPool"
}
resource "azurerm_network_interface_backend_address_pool_association" "main" {
ip_configuration_name = "Configuration-VMs"
network_interface_id = module.linuxservers.vm_ids
backend_address_pool_id = azurerm_lb_backend_address_pool.main.id
}
Module/main.tf
resource "azurerm_virtual_machine" "vm-linux" {
count = ! contains(list(var.vm_os_simple, var.vm_os_offer), "WindowsServer") && ! var.is_windows_image ? var.nb_instances : 0
name = "${var.vm_hostname}-${count.index}"
resource_group_name = data.azurerm_resource_group.vm.name
location = coalesce(var.location, data.azurerm_resource_group.vm.location)
availability_set_id = azurerm_availability_set.vm.id
vm_size = var.vm_size
network_interface_ids = [element(azurerm_network_interface.vm.*.id, count.index)]
delete_os_disk_on_termination = var.delete_os_disk_on_termination
dynamic identity {
for_each = length(var.identity_ids) == 0 && var.identity_type == "SystemAssigned" ? [var.identity_type] : []
content {
type = var.identity_type
}
}
dynamic identity {
for_each = length(var.identity_ids) > 0 || var.identity_type == "UserAssigned" ? [var.identity_type] : []
content {
type = var.identity_type
identity_ids = length(var.identity_ids) > 0 ? var.identity_ids : []
}
}
storage_image_reference {
id = var.vm_os_id
publisher = var.vm_os_id == "" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : ""
offer = var.vm_os_id == "" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : ""
sku = var.vm_os_id == "" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : ""
version = var.vm_os_id == "" ? var.vm_os_version : ""
}
storage_os_disk {
name = "osdisk-${var.vm_hostname}-${count.index}"
create_option = "FromImage"
caching = "ReadWrite"
managed_disk_type = var.storage_account_type
}
dynamic storage_data_disk {
for_each = range(var.nb_data_disk)
content {
name = "${var.vm_hostname}-datadisk-${count.index}-${storage_data_disk.value}"
create_option = "Empty"
lun = storage_data_disk.value
disk_size_gb = var.data_disk_size_gb
managed_disk_type = var.data_sa_type
}
}
os_profile {
computer_name = "${var.vm_hostname}-${count.index}"
admin_username = var.admin_username
admin_password = var.admin_password
custom_data = var.custom_data
}
}
}
tags = var.tags
boot_diagnostics {
enabled = var.boot_diagnostics
storage_uri = var.boot_diagnostics ? join(",", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : ""
}
}
module/output.tf
output "vm_ids" {
description = "Virtual machine ids created."
value = concat(azurerm_virtual_machine.vm-windows.*.id, azurerm_virtual_machine.vm-linux.*.id)
}
output "network_security_group_id" {
description = "id of the security group provisioned"
value = azurerm_network_security_group.vm.id
}
output "network_security_group_name" {
description = "name of the security group provisioned"
value = azurerm_network_security_group.vm.name
}
output "network_interface_ids" {
description = "ids of the vm nics provisoned."
value = azurerm_network_interface.vm.*.id
}
output "network_interface_private_ip" {
description = "private ip addresses of the vm nics"
value = azurerm_network_interface.vm.*.private_ip_address
}
You need to configure it for each element from 'ids` so it should be like:
resource "azurerm_network_interface_backend_address_pool_association" "main" {
count = var.node_count
ip_configuration_name = "Configuration-VMs-${count.index}"
network_interface_id = element(module.linuxservers.vm_ids, count.index)
backend_address_pool_id = azurerm_lb_backend_address_pool.main.id
}
I'm not sure about syntax here element(module.linuxservers.vm_ids, count.index), because I don't see where this output is defined (maybe I'm blind). But this is pretty much what you need.
Related
I'm following Neal Shah's instructions for deploying multiple VMs with multiple managed disks (https://www.nealshah.dev/posts/2020/05/terraform-for-azure-deploying-multiple-vms-with-multiple-managed-disks/#deploying-multiple-vms-with-multiple-datadisks)
everything works fine except for the azurerm_virtual_machine_data_disk_attachment resource which fails with the following error
│ Error: Invalid index
│
│ on main.tf line 103, in resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach":
│ 103: virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
│ ├────────────────
│ │ azurerm_linux_virtual_machine.vms is tuple with 3 elements
│ │ each.key is "datadisk_dca0-apache-cassandra-node0_disk00"
│
│ The given key does not identify an element in this collection value: a number is required.
my code is below:
locals {
vm_datadiskdisk_count_map = { for k in toset(var.nodes) : k => var.data_disk_count }
luns = { for k in local.datadisk_lun_map : k.datadisk_name => k.lun }
datadisk_lun_map = flatten([
for vm_name, count in local.vm_datadiskdisk_count_map : [
for i in range(count) : {
datadisk_name = format("datadisk_%s_disk%02d", vm_name, i)
lun = i
}
]
])
}
# create resource group
resource "azurerm_resource_group" "resource_group" {
name = format("%s-%s", var.dca, var.name)
location = var.location
}
# create availability set
resource "azurerm_availability_set" "vm_availability_set" {
name = format("%s-%s-availability-set", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
}
# create Security Group to access linux
resource "azurerm_network_security_group" "linux_vm_nsg" {
name = format("%s-%s-linux-vm-nsg", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
security_rule {
name = "AllowSSH"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# associate the linux NSG with the subnet
resource "azurerm_subnet_network_security_group_association" "linux_vm_nsg_association" {
subnet_id = "${data.azurerm_subnet.subnet.id}"
network_security_group_id = azurerm_network_security_group.linux_vm_nsg.id
}
# create NICs for apache cassandra hosts
resource "azurerm_network_interface" "vm_nics" {
depends_on = [azurerm_subnet_network_security_group_association.linux_vm_nsg_association]
count = length(var.nodes)
name = format("%s-%s-nic${count.index}", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
ip_configuration {
name = format("%s-%s-apache-cassandra-ip", var.dca, var.name)
subnet_id = "${data.azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
# create apache cassandra VMs
resource "azurerm_linux_virtual_machine" "vms" {
count = length(var.nodes)
name = element(var.nodes, count.index)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
network_interface_ids = [element(azurerm_network_interface.vm_nics.*.id, count.index)]
availability_set_id = azurerm_availability_set.vm_availability_set.id
size = var.vm_size
admin_username = var.admin_username
disable_password_authentication = true
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_pub_key
}
source_image_id = var.source_image_id
os_disk {
caching = "ReadWrite"
storage_account_type = var.storage_account_type
disk_size_gb = var.os_disk_size_gb
}
}
# create data disk(s) for VMs
resource "azurerm_managed_disk" "managed_disk" {
for_each= toset([for j in local.datadisk_lun_map : j.datadisk_name])
name= each.key
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
storage_account_type = var.storage_account_type
create_option = "Empty"
disk_size_gb = var.disk_size_gb
}
resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
managed_disk_id = azurerm_managed_disk.managed_disk[each.key].id
virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
lun = lookup(local.luns, each.key)
caching = "ReadWrite"
}
anyone know how to accomplish this? thanks!
I've tried several different approaches to this but have been unsuccessful so far, I was expecting it to work as described in Neal's post
I was able to get this working. However, I have not tested adding/removing nodes/disks yet. But this working to create multiple VMs with multiple data disks attached to each VM.
I use a variable file that I source to substitute the variables in the *.tf files.
variables.tf
variable "azure_subscription_id" {
type = string
description = "Azure Subscription ID"
default = ""
}
variable "dca" {
type = string
description = "datacenter [dca0|dca2|dca4|dca6]."
default = ""
}
variable "location" {
type = string
description = "Location of the resource group."
default = ""
}
variable "resource_group" {
type = string
description = "resource group name."
default = ""
}
variable "subnet_name" {
type = string
description = "subnet name"
default = ""
}
variable "vnet_name" {
type = string
description = "vnet name"
default = ""
}
variable "vnet_rg" {
type = string
description = "vnet resource group"
default = ""
}
variable "vm_size" {
type = string
description = "vm size"
default = ""
}
variable "os_disk_size_gb" {
type = string
description = "vm os disk size gb"
default = ""
}
variable "data_disk_size_gb" {
type = string
description = "vm data disk size gb"
default = ""
}
variable "admin_username" {
type = string
description = "admin user name"
default = ""
}
variable "ssh_pub_key" {
type = string
description = "public key for admin user"
default = ""
}
variable "source_image_id" {
type = string
description = "image id"
default = ""
}
variable "os_disk_storage_account_type" {
type = string
description = ""
default = ""
}
variable "data_disk_storage_account_type" {
type = string
description = ""
default = ""
}
variable "vm_list" {
type = map(object({
hostname = string
}))
default = {
vm0 ={
hostname = "${dca}-${name}-node-0"
},
vm1 = {
hostname = "${dca}-${name}-node-1"
}
vm2 = {
hostname = "${dca}-${name}-node-2"
}
}
}
variable "disks_per_instance" {
type = string
description = ""
default = ""
}
terraform.tfvars
# subscription
azure_subscription_id = "${azure_subscription_id}"
# name and location
resource_group = "${dca}-${name}"
location = "${location}"
dca = "${dca}"
# Network
subnet_name = "${subnet_name}"
vnet_name = "${dca}vnet"
vnet_rg = "th-${dca}-vnet"
# VM
vm_size = "${vm_size}"
os_disk_size_gb = "${os_disk_size_gb}"
os_disk_storage_account_type = "${os_disk_storage_account_type}"
source_image_id = "${source_image_id}"
# User/key info
admin_username = "${admin_username}"
ssh_pub_key = ${ssh_pub_key}
# data disk info
data_disk_storage_account_type = "${data_disk_storage_account_type}"
data_disk_size_gb = "${data_disk_size_gb}"
disks_per_instance= "${disks_per_instance}"
main.tf
# set locals for multi data disks
locals {
vm_datadiskdisk_count_map = { for k, query in var.vm_list : k => var.disks_per_instance }
luns = { for k in local.datadisk_lun_map : k.datadisk_name => k.lun }
datadisk_lun_map = flatten([
for vm_name, count in local.vm_datadiskdisk_count_map : [
for i in range(count) : {
datadisk_name = format("datadisk_%s_disk%02d", vm_name, i)
lun = i
}
]
])
}
# create resource group
resource "azurerm_resource_group" "resource_group" {
name = format("%s", var.resource_group)
location = var.location
}
# create data disk(s)
resource "azurerm_managed_disk" "managed_disk" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
name = each.key
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
storage_account_type = var.data_disk_storage_account_type
create_option = "Empty"
disk_size_gb = var.data_disk_size_gb
}
# create availability set
resource "azurerm_availability_set" "vm_availability_set" {
name = format("%s-availability-set", var.resource_group)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
}
# create Security Group to access linux
resource "azurerm_network_security_group" "linux_vm_nsg" {
name = format("%s-linux-vm-nsg", var.resource_group)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
security_rule {
name = "AllowSSH"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# associate the linux NSG with the subnet
resource "azurerm_subnet_network_security_group_association" "linux_vm_nsg_association" {
subnet_id = "${data.azurerm_subnet.subnet.id}"
network_security_group_id = azurerm_network_security_group.linux_vm_nsg.id
}
# create NICs for vms
resource "azurerm_network_interface" "nics" {
depends_on = [azurerm_subnet_network_security_group_association.linux_vm_nsg_association]
for_each = var.vm_list
name = "${each.value.hostname}-nic"
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
ip_configuration {
name = format("%s-proxy-ip", var.resource_group)
subnet_id = "${data.azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
# create VMs
resource "azurerm_linux_virtual_machine" "vms" {
for_each = var.vm_list
name = each.value.hostname
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
network_interface_ids = [azurerm_network_interface.nics[each.key].id]
availability_set_id = azurerm_availability_set.vm_availability_set.id
size = var.vm_size
source_image_id = var.source_image_id
custom_data = filebase64("cloud-init.sh")
admin_username = var.admin_username
disable_password_authentication = true
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_pub_key
}
os_disk {
caching = "ReadWrite"
storage_account_type = var.os_disk_storage_account_type
disk_size_gb = var.os_disk_size_gb
}
}
# attache data disks vms
resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
managed_disk_id = azurerm_managed_disk.managed_disk[each.key].id
virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
lun = lookup(local.luns, each.key)
caching = "ReadWrite"
}
I have .tfvars file below with below contents as the the input variables
aks_configuration = {
aks1 = {
name = "cnitest"
location = "westeurope"
kubernetes_version = "1.22.4"
dns_prefix = "cnitest"
default_nodepool_name = "general"
default_nodepool_size = "Standard_B2s"
default_nodepool_count = 2
default_node_pool_autoscale = true
default_node_pool_autoscale_min_count = 1
default_node_pool_autoscale_max_count = 2
aks_zones = null
network_plugin = null
network_policy = null
vnet_name = null
vnet_enabled = false
subnet_name = null
objectID= ["*********]
nodepool = [
{
name = "dts"
vm_size = "Standard_B2s"
enable_auto_scaling = true
mode = "user"
node_count = 1
max_count = 2
min_count = 1
}
]
}
}
Now I need to create an aks cluster with conditions to choose azurecni or kubenet as the part of the network configuration.
if the vnet_enabled is false it should disable data resource in the terraform and give the value as null for the below configuration
#get nodepool vnet subnet ID
data "azurerm_subnet" "example" {
for_each = local.aks_config.vnet_enabled
name = each.value.subnet_name
virtual_network_name = each.value.vnet_name
resource_group_name = var.rg-name
}
resource "azurerm_kubernetes_cluster" "example" {
for_each = local.aks_config
name = each.value.name
location = each.value.location
resource_group_name = var.rg-name
dns_prefix = each.value.dns_prefix
default_node_pool {
name = each.value.default_nodepool_name
node_count = each.value.default_nodepool_count
vm_size = each.value.default_nodepool_size
enable_auto_scaling = each.value.default_node_pool_autoscale
min_count = each.value.default_node_pool_autoscale_min_count
max_count = each.value.default_node_pool_autoscale_max_count
vnet_subnet_id = data.azurerm_subnet.example[each.key].id
zones = each.value.aks_zones
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = each.value.network_plugin
network_policy = each.value.network_policy
}
# azure_active_directory_role_based_access_control {
# managed = true
# admin_group_object_ids = [each.value.objectID]
# }
}
If the vnet integration needs to be done which is the parameter vnet_enabled is set to true then the data block fetches the details of subnet to be used by the AKS Cluster and then uses it and also sets the network plugin to azure . If its false then the data block is not utilized and the subnet is provided null value in AKS cluster and it uses `kubenet. You don't need to add network plugin and network policy in the locals , you can leverage the same in the conditional values.
To achieve the above , You have to use something like below :
data "azurerm_subnet" "example" {
for_each = local.aks_configuration.vnet_enabled ? 1 : 0
name = each.value.subnet_name
virtual_network_name = each.value.vnet_name
resource_group_name = var.rg-name
}
resource "azurerm_kubernetes_cluster" "example" {
for_each = local.aks_configuration
name = each.value.name
location = each.value.location
resource_group_name = var.rg-name
dns_prefix = each.value.dns_prefix
default_node_pool {
name = each.value.default_nodepool_name
node_count = each.value.default_nodepool_count
vm_size = each.value.default_nodepool_size
enable_auto_scaling = each.value.default_node_pool_autoscale
min_count = each.value.default_node_pool_autoscale_min_count
max_count = each.value.default_node_pool_autoscale_max_count
vnet_subnet_id = each.value.vnet_enabled ? data.azurerm_subnet.example[0].id : null
zones = each.value.aks_zones
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = each.value.vnet_enabled ? "azure" : "kubenet"
network_policy = each.value.vnet_enabled ? "azure" : "calico"
}
# azure_active_directory_role_based_access_control {
# managed = true
# admin_group_object_ids = [each.value.objectID]
# }
}
Does anyone could give a hint on how should i do. i want to deploy 4 Az linux VM but 2 in each different resource group.
eg: rg-one will have 2 linux and rg-two will the other two
the way i did it is too create two block with azure_linux_virtual_machine, it is working but i was wondering if it could not be simplify.
thank in advance for the head up
here a snipped what i have done.
# Fetch exisiting resource_group
data "azurerm_resource_group" "rg-dock001" {
name = var.resource_group01
}
# Fetch vm network
data "azurerm_virtual_network" "vm_network" {
name = var.vm_network
resource_group_name = var.rg_name_network
}
output "azurerm_virtual_network" {
value = data.azurerm_virtual_network.vm_network.id
}
# Fetch vm subnet
data "azurerm_subnet" "vm_subnet" {
name = var.vm_subnet
resource_group_name = var.rg_name_network
virtual_network_name = var.vm_network
}
output "subnet_id" {
value = data.azurerm_subnet.vm_subnet.id
}
resource "azurerm_network_interface" "ens124-01" {
name = var.vm_nic01[count.index]
count = length(var.vm_nic01)
location = var.rg_location
resource_group_name = var.resource_group01
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet.id
private_ip_address_allocation = "Static"
private_ip_address = "10.241.25.${count.index + 10}"
}
tags = var.vm_tags
}
output "private_ip01" {
value = length(azurerm_network_interface.ens124-01.*.private_ip_address)
}
# Fetch existing image
data "azurerm_image" "custom_docker_image" {
name_regex = var.packer_image
sort_descending = true
resource_group_name = var.resource_group_image
}
output "image_id" {
value = data.azurerm_image.custom_docker_image.id
}
# create and display an SSH key
resource "tls_private_key" "ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" {
value = tls_private_key.ssh.private_key_pem
sensitive = true
}
resource "azurerm_linux_virtual_machine" "main01" {
name = var.vm_name01[count.index]
count = length(var.vm_name01)
resource_group_name = var.resource_group01
location = var.rg_location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = ["${element(azurerm_network_interface.ens124-01.*.id, count.index)}"]
source_image_id = data.azurerm_image.custom_docker_image.id
computer_name = var.vm_name01[count.index]
admin_ssh_key {
username = var.ssh_username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-0${count.index + 1}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
tags = var.vm_tags
}
data "azurerm_resource_group" "rg-dock002" {
name = var.resource_group02
}
resource "azurerm_network_interface" "ens124-02" {
name = var.vm_nic02[count.index]
count = length(var.vm_nic02)
location = var.rg_location
resource_group_name = var.resource_group02
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet.id
private_ip_address_allocation = "Static"
private_ip_address = "10.241.25.${count.index + 20}"
}
tags = var.vm_tags
}
output "private_ip02" {
value = length(azurerm_network_interface.ens124-02.*.private_ip_address)
}
resource "azurerm_linux_virtual_machine" "main02" {
name = var.vm_name02[count.index]
count = length(var.vm_name02)
resource_group_name = var.resource_group02
location = var.rg_location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = ["${element(azurerm_network_interface.ens124-02.*.id, count.index)}"]
source_image_id = data.azurerm_image.custom_docker_image.id
computer_name = var.vm_name02[count.index]
admin_ssh_key {
username = var.ssh_username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-0${count.index + 1}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
tags = var.vm_tags
}````
As per your requirement you can use something like below :
provider "azurerm" {
features {}
}
variable "VM" {
default= {
vm1={
rg_name= "ajaytest",
vnet_name="ajay-vnet",
subnet_name="default",
nic_name = "ansumanVM-nic",
private_ip="10.0.0.10",
vm_name="ansumanVM"
},
vm2={
rg_name= "kartiktest",
vnet_name="kartik-vnet",
subnet_name="default",
nic_name="terraformVM-nic",
private_ip="10.0.0.20",
vm_name="terraformVM"
}
}
}
variable "username" {
default="ansuman"
}
variable "password" {
default="Password#1234!"
}
data "azurerm_shared_image_version" "example" {
name = "0.0.1"
image_name = "UbuntuwithNginxinstalled"
gallery_name = "ansumantestgallery"
resource_group_name = "ansumantest"
}
data "azurerm_resource_group" "rg-dock" {
for_each = var.VM
name = each.value["rg_name"]
}
# Fetch vm network
data "azurerm_virtual_network" "vm_network" {
for_each = var.VM
name = each.value["vnet_name"]
resource_group_name = each.value["rg_name"]
}
# Fetch vm subnet
data "azurerm_subnet" "vm_subnet" {
for_each = var.VM
name = each.value["subnet_name"]
resource_group_name = each.value["rg_name"]
virtual_network_name = each.value["vnet_name"]
}
resource "azurerm_network_interface" "ens124" {
for_each = var.VM
name = each.value["nic_name"]
location = data.azurerm_resource_group.rg-dock[each.key].location
resource_group_name = data.azurerm_resource_group.rg-dock[each.key].name
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet[each.key].id
private_ip_address_allocation = "Static"
private_ip_address = each.value["private_ip"]
}
}
# create and display an SSH key
resource "tls_private_key" "ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" {
value = tls_private_key.ssh.private_key_pem
sensitive = true
}
resource "azurerm_linux_virtual_machine" "main" {
for_each = var.VM
name = each.value["vm_name"]
resource_group_name = data.azurerm_resource_group.rg-dock[each.key].name
location = data.azurerm_resource_group.rg-dock[each.key].location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = [azurerm_network_interface.ens124[format("%s", each.key)].id]
source_image_id = data.azurerm_shared_image_version.example.id
computer_name = each.key
admin_ssh_key {
username = var.username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-${each.key}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
}
Note: The admin username and the ssh username must be the same due the below limitation :
And the location of the VM ,Vnet ,Image and other resources must be the same.
Output:
I am trying to create multiple Azure VM and not able to assign VMs to different availability_set. Please see my code below:
Module "vm_dev":
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.82.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "rg_dev" {
name = "MYORG_RG_DEV"
location = var.location
}
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
vm.tf:
module "vm_dev" {
source = "./vm_dev"
vm_size = "Standard_D4s_v3"
hostname_prefix = "myorg"
group_name_prefix = var.group_prefix
location = var.location
subnet_id = local.subnet_id
ssh_key = local.ssh_public_key
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1" }
"aa-elastic-master-1" = { ip = "10.0.100.2" }
"xx-elastic-master-0" = { ip = "10.0.99.1" }
"xx-elastic-master-1" = { ip = "10.0.99.2" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3" }
"aa-elastic-data-1" = { ip = "10.0.100.4" }
"aa-elastic-data-2" = { ip = "10.0.100.5" }
"xx-elastic-data-0" = { ip = "10.0.99.3" }
"xx-elastic-data-1" = { ip = "10.0.99.4" }
"xx-elastic-data-2" = { ip = "10.0.99.5" }
}
}
This works fine and I am able to create VMs. So far each VM is being created without assigning to availability_set. I would like to specify to which availability_set each VM belongs to, something like this:
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1", as = "azurerm_availability_set.as_aamaster.id" }
"aa-elastic-master-1" = { ip = "10.0.100.2", as = "azurerm_availability_set.as_aamaster.id" }
"xx-elastic-master-0" = { ip = "10.0.99.1", as = "azurerm_availability_set.as_xxmaster.id" }
"xx-elastic-master-1" = { ip = "10.0.99.2", as = "azurerm_availability_set.as_xxmaster.id" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-1" = { ip = "10.0.100.4", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-2" = { ip = "10.0.100.5", as = "azurerm_availability_set.as_aadata.id" }
"xx-elastic-data-0" = { ip = "10.0.99.3", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-1" = { ip = "10.0.99.4", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-2" = { ip = "10.0.99.5", as = "azurerm_availability_set.as_xxdata.id" }
}
adding in module following code:
resource "azurerm_availability_set" "as_aamaster" {
name = "${var.hostname_prefix}-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
...
gives me error
Error: Cannot parse Azure ID: parse "azurerm_availability_set.as_aamaster.id": invalid URI for request
on vm_dev/main.tf line 72, in resource "azurerm_linux_virtual_machine" "node_master":
72: availability_set_id = each.value.as
Any advice is appreciated.
Thanks
I tested your code and it failed with the same error as below :
So , For Solution as Ivan Ignatiev has suggested you will have to use the below :
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "${azurerm_availability_set.as_aamaster.id}" }
"aa-elastic-master-1" = { ip = "10.0.2.2", as = "${azurerm_availability_set.as_aamaster.id}" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "${azurerm_availability_set.as_xxmaster.id}" }
"xx-elastic-master-1" = { ip = "10.0.2.4", as = "${azurerm_availability_set.as_xxmaster.id}" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.2.5", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-1" = { ip = "10.0.2.6", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-2" = { ip = "10.0.2.7", as = "${azurerm_availability_set.as_aadata.id}" }
"xx-elastic-data-0" = { ip = "10.0.2.8", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-1" = { ip = "10.0.2.9", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-2" = { ip = "10.0.2.10", as = "${azurerm_availability_set.as_xxdata.id}" }
}
main.tf
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aamaster" {
name = "ansuman-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxmaster" {
name = "ansuman-as-xxmaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aadata" {
name = "ansuman-as-aadata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxdata" {
name = "ansuman-as-xxdata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
Output:
AnsumanBal-MT, this did not work for me, I have added comment above, but i was able to solve this via:
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "0" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "1" }
in module:
resource "azurerm_availability_set" "as_dev" {
count = 5
name = "${var.hostname_prefix}-dev-${element(var.availability_set_name, count.index)}-as"
resource_group_name = azurerm_resource_group.rg_dev.name
location = var.location
}
for azurerm_linux_virtual_machine added:
availability_set_id = azurerm_availability_set.as_dev[each.value.as].id
variable:
variable "availability_set_name" {
description = "Availability set name that the VMs will be created in"
type = list(any)
default = ["aamaster", "xxmaster", "aadata", ....]
}
Terraform code is below
module "centos-vm-author-2" {
source = "terraform.automation.temp.com.au/temp/temp-linux-vm/azurerm"
version = "6.7.0"
location = var.resource_location
resource_group_name = var.resource_group_name_2
vm_count = "1"
tags = local.tags
size = var.vm_size
hostname_prefix = var.hostname_prefix
hostname_suffix_start_range = "491"
image_publisher = "OpenLogic"
image_offer = "Centos"
image_sku = "7_9"
subnet_id = var.auth_pub_subnet_id
admin_username = "azureadmin"
availability_set_id = azurerm_availability_set.aemfeature1authoras.id
patching_tags = local.patching_tags
ansible_vault_key = var.ansible_vault_key
log_to_loganalytics = false
ou_tags = local.ou_tags
os_disk_size = var.os_size_gb
os_disk_type = var.storage_account_type
server_access_memberships = ["CN=DSTDEVOPS,OU=DistributionGroups,OU=Groups,OU=Resources,DC=temp,DC=int"]
sudoers = ["%DSTDEVOPS"]
data_disks = [
[
{
disk_size_gb = var.disk_size_gb
storage_account_type = var.storage_account_type
caching = "ReadWrite"
create_option = "Empty"
source_resource_id = ""
write_accelerator_enabled = false
}
]
]
}
resource "null_resource" "centos-vm-author-ansible" {
provisioner "local-exec" {
command = <<EOF
ansible-playbook -i '${join(",", azurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
EOF
}
depends_on = [
module.centos-vm-author-2
]
}
}
Basically I want to tell Ansible the Private IP onto which it should execute the role.
I am getting error like below,
Error: [0m[0m[1mReference to undeclared resource[0m
on main.tf line 236, in resource "null_resource" "centos-vm-author-ansible":
ansible-playbook -i '${join(",", [4mazurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
A managed resource "azurerm_network_interface" "centos-vm-author-2" has not
been declared in the root module.
Sincerely appreciate any help to understand what is the issue and how to have it resolved.
P.S: The TF Module code is like below:
resource "azurerm_network_interface" "main" {
count = var.vm_count
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-nic"
location = var.location
resource_group_name = var.resource_group_name
enable_accelerated_networking = var.enable_accelerated_networking
ip_configuration {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-ipconfig"
subnet_id = var.subnet_id
private_ip_address_allocation = var.private_ip_address_allocation
private_ip_address = var.private_ip_address
public_ip_address_id = var.enable_public_ip_address ? azurerm_public_ip.main[count.index].id : null
}
tags = var.tags
}
resource "azurerm_network_interface_backend_address_pool_association" "lbconf" {
count = var.backend_address_pool_id == null ? 0 : var.vm_count
network_interface_id = azurerm_network_interface.main[count.index].id
ip_configuration_name = azurerm_network_interface.main[count.index].ip_configuration[0].name
backend_address_pool_id = var.backend_address_pool_id
}
resource "azurerm_linux_virtual_machine" "main" {
count = var.vm_count
name = format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )
location = var.location
resource_group_name = var.resource_group_name
admin_username = var.admin_username
admin_ssh_key {
username = var.admin_username
public_key = chomp(tls_private_key.bootstrap_private_key.public_key_openssh)
}
disable_password_authentication = var.disable_password_authentication
network_interface_ids = [azurerm_network_interface.main[count.index].id]
size = var.size
availability_set_id = var.availability_set_id
source_image_reference {
publisher = var.image_publisher
offer = var.image_offer
sku = var.image_sku
version = var.image_version
}
os_disk {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-osdisk"
caching = "ReadWrite"
storage_account_type = var.os_disk_type
disk_size_gb = var.os_disk_size
}
dynamic "identity" {
for_each = var.identity
content {
type = identity.value["type"]
identity_ids = identity.value["type"] == "SystemAssigned" ? [] : identity.value["identity_ids"]
}
}
dynamic "plan" {
for_each = var.marketplace_image ? [1] : []
content {
name = var.image_sku
product = var.image_offer
publisher = var.image_publisher
}
}
# boot_diagnostics {
# storage_account_uri = var.boot_diagnostics_storage_uri
# }
tags = var.ou_tags == null ? merge(var.tags, var.patching_tags) : merge(var.tags, var.ou_tags, var.patching_tags)
}
To refer to your module, instead of:
azurerm_network_interface.centos-vm-author-2.*.private_ip_address
it should be:
module.centos-vm-author-2.private_ip_addresses