Does anyone could give a hint on how should i do. i want to deploy 4 Az linux VM but 2 in each different resource group.
eg: rg-one will have 2 linux and rg-two will the other two
the way i did it is too create two block with azure_linux_virtual_machine, it is working but i was wondering if it could not be simplify.
thank in advance for the head up
here a snipped what i have done.
# Fetch exisiting resource_group
data "azurerm_resource_group" "rg-dock001" {
name = var.resource_group01
}
# Fetch vm network
data "azurerm_virtual_network" "vm_network" {
name = var.vm_network
resource_group_name = var.rg_name_network
}
output "azurerm_virtual_network" {
value = data.azurerm_virtual_network.vm_network.id
}
# Fetch vm subnet
data "azurerm_subnet" "vm_subnet" {
name = var.vm_subnet
resource_group_name = var.rg_name_network
virtual_network_name = var.vm_network
}
output "subnet_id" {
value = data.azurerm_subnet.vm_subnet.id
}
resource "azurerm_network_interface" "ens124-01" {
name = var.vm_nic01[count.index]
count = length(var.vm_nic01)
location = var.rg_location
resource_group_name = var.resource_group01
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet.id
private_ip_address_allocation = "Static"
private_ip_address = "10.241.25.${count.index + 10}"
}
tags = var.vm_tags
}
output "private_ip01" {
value = length(azurerm_network_interface.ens124-01.*.private_ip_address)
}
# Fetch existing image
data "azurerm_image" "custom_docker_image" {
name_regex = var.packer_image
sort_descending = true
resource_group_name = var.resource_group_image
}
output "image_id" {
value = data.azurerm_image.custom_docker_image.id
}
# create and display an SSH key
resource "tls_private_key" "ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" {
value = tls_private_key.ssh.private_key_pem
sensitive = true
}
resource "azurerm_linux_virtual_machine" "main01" {
name = var.vm_name01[count.index]
count = length(var.vm_name01)
resource_group_name = var.resource_group01
location = var.rg_location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = ["${element(azurerm_network_interface.ens124-01.*.id, count.index)}"]
source_image_id = data.azurerm_image.custom_docker_image.id
computer_name = var.vm_name01[count.index]
admin_ssh_key {
username = var.ssh_username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-0${count.index + 1}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
tags = var.vm_tags
}
data "azurerm_resource_group" "rg-dock002" {
name = var.resource_group02
}
resource "azurerm_network_interface" "ens124-02" {
name = var.vm_nic02[count.index]
count = length(var.vm_nic02)
location = var.rg_location
resource_group_name = var.resource_group02
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet.id
private_ip_address_allocation = "Static"
private_ip_address = "10.241.25.${count.index + 20}"
}
tags = var.vm_tags
}
output "private_ip02" {
value = length(azurerm_network_interface.ens124-02.*.private_ip_address)
}
resource "azurerm_linux_virtual_machine" "main02" {
name = var.vm_name02[count.index]
count = length(var.vm_name02)
resource_group_name = var.resource_group02
location = var.rg_location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = ["${element(azurerm_network_interface.ens124-02.*.id, count.index)}"]
source_image_id = data.azurerm_image.custom_docker_image.id
computer_name = var.vm_name02[count.index]
admin_ssh_key {
username = var.ssh_username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-0${count.index + 1}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
tags = var.vm_tags
}````
As per your requirement you can use something like below :
provider "azurerm" {
features {}
}
variable "VM" {
default= {
vm1={
rg_name= "ajaytest",
vnet_name="ajay-vnet",
subnet_name="default",
nic_name = "ansumanVM-nic",
private_ip="10.0.0.10",
vm_name="ansumanVM"
},
vm2={
rg_name= "kartiktest",
vnet_name="kartik-vnet",
subnet_name="default",
nic_name="terraformVM-nic",
private_ip="10.0.0.20",
vm_name="terraformVM"
}
}
}
variable "username" {
default="ansuman"
}
variable "password" {
default="Password#1234!"
}
data "azurerm_shared_image_version" "example" {
name = "0.0.1"
image_name = "UbuntuwithNginxinstalled"
gallery_name = "ansumantestgallery"
resource_group_name = "ansumantest"
}
data "azurerm_resource_group" "rg-dock" {
for_each = var.VM
name = each.value["rg_name"]
}
# Fetch vm network
data "azurerm_virtual_network" "vm_network" {
for_each = var.VM
name = each.value["vnet_name"]
resource_group_name = each.value["rg_name"]
}
# Fetch vm subnet
data "azurerm_subnet" "vm_subnet" {
for_each = var.VM
name = each.value["subnet_name"]
resource_group_name = each.value["rg_name"]
virtual_network_name = each.value["vnet_name"]
}
resource "azurerm_network_interface" "ens124" {
for_each = var.VM
name = each.value["nic_name"]
location = data.azurerm_resource_group.rg-dock[each.key].location
resource_group_name = data.azurerm_resource_group.rg-dock[each.key].name
ip_configuration {
name = "internal"
subnet_id = data.azurerm_subnet.vm_subnet[each.key].id
private_ip_address_allocation = "Static"
private_ip_address = each.value["private_ip"]
}
}
# create and display an SSH key
resource "tls_private_key" "ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" {
value = tls_private_key.ssh.private_key_pem
sensitive = true
}
resource "azurerm_linux_virtual_machine" "main" {
for_each = var.VM
name = each.value["vm_name"]
resource_group_name = data.azurerm_resource_group.rg-dock[each.key].name
location = data.azurerm_resource_group.rg-dock[each.key].location
size = "standard_ds3_v2"
admin_username = var.username
admin_password = var.password
disable_password_authentication = true
network_interface_ids = [azurerm_network_interface.ens124[format("%s", each.key)].id]
source_image_id = data.azurerm_shared_image_version.example.id
computer_name = each.key
admin_ssh_key {
username = var.username
public_key = tls_private_key.ssh.public_key_openssh
}
os_disk {
name = "disk-int-dock-${each.key}"
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
}
Note: The admin username and the ssh username must be the same due the below limitation :
And the location of the VM ,Vnet ,Image and other resources must be the same.
Output:
Related
I'm trying to deploy a simple infrastructure in Azure through Terraform, the infrastructure is made of an Application Gateway (with Web Application Firewall, so the WAF_v2 version) with two virtual machines in the backend.
At the beginning I have implemented the Application Gateway (Standard_v2) without the WAF, and worked properly, but when I have implemented the WAF, I got the following error after lunching "terraform init" command (see attached screenshot also):
Error: Failed to query available provider packages
│
│ Could not retrieve the list of available versions for provider hashicorp/example: provider registry registry.terraform.io does not have a provider named
│ registry.terraform.io/hashicorp/example
│
│ All modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending
│ on hashicorp/example, run the following command:
│ terraform providers
So I run the command "terraform providers" as suggested by Terraform and got this:
Providers required by configuration:
.
├── provider[registry.terraform.io/hashicorp/azurerm] >= 2.97.0
├── provider[registry.terraform.io/hashicorp/example]
└── provider[registry.terraform.io/hashicorp/random]
In the following you can see the Terraform code of my infrastructure:
terraform {
required_version = ">=0.12"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=2.97.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "rg1" {
name = "myResourceGroupAG"
location = "francecentral"
}
resource "example_wafpolicy" "exampleWAF" {
name = "example_wafpolicy_name"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
custom_rules {
name = "Rule1"
priority = 1
rule_type = "MatchRule"
match_conditions {
match_variables {
variable_name = "RemoteAddr"
}
operator = "IPMatch"
negation_condition = false
match_values = ["XX.XX.XX.XX"]
}
action = "Block"
}
policy_settings {
enabled = true
mode = "Prevention"
request_body_check = true
file_upload_limit_in_mb = 100
max_request_body_size_in_kb = 128
}
managed_rules {
managed_rule_set {
type = "OWASP"
version = "3.2"
}
}
}
resource "azurerm_virtual_network" "vnet1" {
name = "myVNet"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
address_space = ["10.21.0.0/16"]
}
resource "azurerm_subnet" "frontend" {
name = "myAGSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.0.0/24"]
}
resource "azurerm_subnet" "backend" {
name = "myBackendSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.1.0/24"]
}
resource "azurerm_public_ip" "pip1" {
name = "myAGPublicIPAddress"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_application_gateway" "network" {
name = "myAppGateway"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
sku {
name = "WAF_v2"
tier = "WAF_v2"
capacity = 2
}
gateway_ip_configuration {
name = "my-gateway-ip-configuration"
subnet_id = azurerm_subnet.frontend.id
}
frontend_port {
name = var.frontend_port_name
port = 80
}
frontend_ip_configuration {
name = var.frontend_ip_configuration_name
public_ip_address_id = azurerm_public_ip.pip1.id
}
backend_address_pool {
name = var.backend_address_pool_name
}
backend_http_settings {
name = var.http_setting_name
cookie_based_affinity = "Disabled"
port = 80
protocol = "Http"
request_timeout = 20
}
http_listener {
name = var.listener_name
frontend_ip_configuration_name = var.frontend_ip_configuration_name
frontend_port_name = var.frontend_port_name
protocol = "Http"
firewall_policy_id = example_wafpolicy.exampleWAF.id
}
request_routing_rule {
name = var.request_routing_rule_name
rule_type = "Basic"
priority = 25
http_listener_name = var.listener_name
backend_address_pool_name = var.backend_address_pool_name
backend_http_settings_name = var.http_setting_name
}
firewall_policy_id {
id = example_wafpolicy.exampleWAF.id
}
waf_configuration {
content{
enabled = lookup(waf_configuration.value,"enabled",true)
file_upload_limit_mb = lookup(waf_configuration.value,"file_upload_limit_mb",30)
firewall_mode = lookup(waf_configuration.value,"firewall_mode","Prevention")
max_request_body_size_kb = lookup(waf_configuration.value,"max_request_body_size_kb",128)
request_body_check = lookup(waf_configuration.value,"request_body_check",true)
rule_set_type = lookup(waf_configuration.value,"rule_set_type","OWASP")
rule_set_version = lookup(waf_configuration.value,"rule_set_version", "3.1")
}
}
}
resource "azurerm_network_interface" "nic" {
count = 2
name = "nic-${count.index+1}"
location = azurerm_resource_group.rg1.location
resource_group_name = azurerm_resource_group.rg1.name
ip_configuration {
name = "nic-ipconfig-${count.index+1}"
subnet_id = azurerm_subnet.backend.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_network_interface_application_gateway_backend_address_pool_association" "nic-assoc01" {
count = 2
network_interface_id = azurerm_network_interface.nic[count.index].id
ip_configuration_name = "nic-ipconfig-${count.index+1}"
backend_address_pool_id = tolist(azurerm_application_gateway.network.backend_address_pool).0.id
}
resource "random_password" "password" {
length = 16
special = true
lower = true
upper = true
numeric = true
}
resource "azurerm_windows_virtual_machine" "vm" {
count = 2
name = "myVM${count.index+1}"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
size = "Standard_DS1_v2"
admin_username = "azureadmin"
admin_password = random_password.password.result
network_interface_ids = [
azurerm_network_interface.nic[count.index].id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2019-Datacenter"
version = "latest"
}
}
resource "azurerm_virtual_machine_extension" "vm-extensions" {
count = 2
name = "vm${count.index+1}-ext"
virtual_machine_id = azurerm_windows_virtual_machine.vm[count.index].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.10"
settings = <<SETTINGS
{
"commandToExecute": "powershell Add-WindowsFeature Web-Server; powershell Add-Content -Path \"C:\\inetpub\\wwwroot\\Default.htm\" -Value $($env:computername)"
}
SETTINGS
}
In the following the script with the variables:
variable "backend_address_pool_name" {
default = "myBackendPool"
}
variable "frontend_port_name" {
default = "myFrontendPort"
}
variable "frontend_ip_configuration_name" {
default = "myAGIPConfig"
}
variable "http_setting_name" {
default = "myHTTPsetting"
}
variable "listener_name" {
default = "myListener"
}
variable "request_routing_rule_name" {
default = "myRoutingRule"
}
variable "redirect_configuration_name" {
default = "myRedirectConfig"
}
variable "example_wafpolicy_name" {
default = "myFirewallPolicy"
}
At the beginning of the code you can see match_values = ["XX.XX.XX.XX"], the IP address is set in this manner just for opening this question in Stackoverflow, normally in my code there is a normal IP address.
I would really appreciate your help to fix this error and in general to deploy an Application Gateway with WAF and two virtual machines in the backend in Azure through Terraform.
I have tried to search something online but it seems that this topic has never been opened by someone.
Issue was caused because of the naming convention which used in terraform code base "example_wafpolicy" and terraform provider.
Solution:
Need to replace with below mention resource tag
resource "azurerm_web_application_firewall_policy" "example" {
Replicated the same code base in local, please find below code snippet.
Main if file as follows:
resource "azurerm_resource_group" "rg1" {
name = "************"
location = "West Europe"
}
resource "azurerm_web_application_firewall_policy" "exampleWAF" {
name = "example_wafpolicy_name"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
custom_rules {
name = "Rule1"
priority = 1
rule_type = "MatchRule"
match_conditions {
match_variables {
variable_name = "RemoteAddr"
}
operator = "IPMatch"
negation_condition = false
match_values = ["192.168.1.0/24", "10.0.0.0/24"]
}
action = "Block"
}
policy_settings {
enabled = true
mode = "Prevention"
request_body_check = true
file_upload_limit_in_mb = 100
max_request_body_size_in_kb = 128
}
managed_rules {
managed_rule_set {
type = "OWASP"
version = "3.2"
}
}
}
resource "azurerm_virtual_network" "vnet1" {
name = "myVNet"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
address_space = ["10.21.0.0/16"]
}
resource "azurerm_subnet" "frontend" {
name = "myAGSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.0.0/24"]
}
resource "azurerm_subnet" "backend" {
name = "myBackendSubnet"
resource_group_name = azurerm_resource_group.rg1.name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = ["10.21.1.0/24"]
}
resource "azurerm_public_ip" "pip1" {
name = "myAGPublicIPAddress"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
allocation_method = "Dynamic"
sku = "Basic"
}
locals {
backend_address_pool_name = "${azurerm_virtual_network.vnet1.name}-beap"
frontend_port_name = "${azurerm_virtual_network.vnet1.name}-feport"
frontend_ip_configuration_name = "${azurerm_virtual_network.vnet1.name}-feip"
http_setting_name = "${azurerm_virtual_network.vnet1.name}-be-htst"
listener_name = "${azurerm_virtual_network.vnet1.name}-httplstn"
request_routing_rule_name = "${azurerm_virtual_network.vnet1.name}-rqrt"
redirect_configuration_name = "${azurerm_virtual_network.vnet1.name}-rdrcfg"
}
resource "azurerm_application_gateway" "network" {
name = "example-appgateway"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
sku {
name = "Standard_Small"
tier = "Standard"
capacity = 2
}
gateway_ip_configuration {
name = "my-gateway-ip-configuration"
subnet_id = azurerm_subnet.frontend.id
}
frontend_port {
name = local.frontend_port_name
port = 80
}
frontend_ip_configuration {
name = local.frontend_ip_configuration_name
public_ip_address_id = azurerm_public_ip.pip1.id
}
backend_address_pool {
name = local.backend_address_pool_name
}
backend_http_settings {
name = local.http_setting_name
cookie_based_affinity = "Disabled"
path = "/path1/"
port = 80
protocol = "Http"
request_timeout = 60
}
http_listener {
name = local.listener_name
frontend_ip_configuration_name = local.frontend_ip_configuration_name
frontend_port_name = local.frontend_port_name
protocol = "Http"
}
request_routing_rule {
name = local.request_routing_rule_name
rule_type = "Basic"
http_listener_name = local.listener_name
backend_address_pool_name = local.backend_address_pool_name
backend_http_settings_name = local.http_setting_name
}
}
resource "azurerm_network_interface" "nic" {
count = 2
name = "nic-${count.index+1}"
location = azurerm_resource_group.rg1.location
resource_group_name = azurerm_resource_group.rg1.name
ip_configuration {
name = "nic-ipconfig-${count.index+1}"
subnet_id = azurerm_subnet.backend.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_network_interface_application_gateway_backend_address_pool_association" "nic-assoc01" {
count = 2
network_interface_id = azurerm_network_interface.nic[count.index].id
ip_configuration_name = "nic-ipconfig-${count.index+1}"
backend_address_pool_id = tolist(azurerm_application_gateway.network.backend_address_pool).0.id
}
resource "random_password" "password" {
length = 16
special = true
lower = true
upper = true
numeric = true
}
resource "azurerm_windows_virtual_machine" "vm" {
count = 2
name = "myVM${count.index+1}"
resource_group_name = azurerm_resource_group.rg1.name
location = azurerm_resource_group.rg1.location
size = "Standard_DS1_v2"
admin_username = "azureadmin"
admin_password = random_password.password.result
network_interface_ids = [
azurerm_network_interface.nic[count.index].id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2019-Datacenter"
version = "latest"
}
}
resource "azurerm_virtual_machine_extension" "vm-extensions" {
count = 2
name = "vm${count.index+1}-ext"
virtual_machine_id = azurerm_windows_virtual_machine.vm[count.index].id
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.10"
settings = <<SETTINGS
{
"commandToExecute": "powershell Add-WindowsFeature Web-Server; powershell Add-Content -Path \"C:\\inetpub\\wwwroot\\Default.htm\" -Value $($env:computername)"
}
SETTINGS
}
provider tf file as follows:
terraform {
required_version = "~>1.3.3"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=3.0.0"
}
}
}
provider "azurerm" {
features {}
skip_provider_registration = true
}
upon running terraform plan and apply
terraform plan
terraform apply -auto-approve
Plan as follows:
Apply as follows
Azure Portal verification:
I'm following Neal Shah's instructions for deploying multiple VMs with multiple managed disks (https://www.nealshah.dev/posts/2020/05/terraform-for-azure-deploying-multiple-vms-with-multiple-managed-disks/#deploying-multiple-vms-with-multiple-datadisks)
everything works fine except for the azurerm_virtual_machine_data_disk_attachment resource which fails with the following error
│ Error: Invalid index
│
│ on main.tf line 103, in resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach":
│ 103: virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
│ ├────────────────
│ │ azurerm_linux_virtual_machine.vms is tuple with 3 elements
│ │ each.key is "datadisk_dca0-apache-cassandra-node0_disk00"
│
│ The given key does not identify an element in this collection value: a number is required.
my code is below:
locals {
vm_datadiskdisk_count_map = { for k in toset(var.nodes) : k => var.data_disk_count }
luns = { for k in local.datadisk_lun_map : k.datadisk_name => k.lun }
datadisk_lun_map = flatten([
for vm_name, count in local.vm_datadiskdisk_count_map : [
for i in range(count) : {
datadisk_name = format("datadisk_%s_disk%02d", vm_name, i)
lun = i
}
]
])
}
# create resource group
resource "azurerm_resource_group" "resource_group" {
name = format("%s-%s", var.dca, var.name)
location = var.location
}
# create availability set
resource "azurerm_availability_set" "vm_availability_set" {
name = format("%s-%s-availability-set", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
}
# create Security Group to access linux
resource "azurerm_network_security_group" "linux_vm_nsg" {
name = format("%s-%s-linux-vm-nsg", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
security_rule {
name = "AllowSSH"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# associate the linux NSG with the subnet
resource "azurerm_subnet_network_security_group_association" "linux_vm_nsg_association" {
subnet_id = "${data.azurerm_subnet.subnet.id}"
network_security_group_id = azurerm_network_security_group.linux_vm_nsg.id
}
# create NICs for apache cassandra hosts
resource "azurerm_network_interface" "vm_nics" {
depends_on = [azurerm_subnet_network_security_group_association.linux_vm_nsg_association]
count = length(var.nodes)
name = format("%s-%s-nic${count.index}", var.dca, var.name)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
ip_configuration {
name = format("%s-%s-apache-cassandra-ip", var.dca, var.name)
subnet_id = "${data.azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
# create apache cassandra VMs
resource "azurerm_linux_virtual_machine" "vms" {
count = length(var.nodes)
name = element(var.nodes, count.index)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
network_interface_ids = [element(azurerm_network_interface.vm_nics.*.id, count.index)]
availability_set_id = azurerm_availability_set.vm_availability_set.id
size = var.vm_size
admin_username = var.admin_username
disable_password_authentication = true
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_pub_key
}
source_image_id = var.source_image_id
os_disk {
caching = "ReadWrite"
storage_account_type = var.storage_account_type
disk_size_gb = var.os_disk_size_gb
}
}
# create data disk(s) for VMs
resource "azurerm_managed_disk" "managed_disk" {
for_each= toset([for j in local.datadisk_lun_map : j.datadisk_name])
name= each.key
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
storage_account_type = var.storage_account_type
create_option = "Empty"
disk_size_gb = var.disk_size_gb
}
resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
managed_disk_id = azurerm_managed_disk.managed_disk[each.key].id
virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
lun = lookup(local.luns, each.key)
caching = "ReadWrite"
}
anyone know how to accomplish this? thanks!
I've tried several different approaches to this but have been unsuccessful so far, I was expecting it to work as described in Neal's post
I was able to get this working. However, I have not tested adding/removing nodes/disks yet. But this working to create multiple VMs with multiple data disks attached to each VM.
I use a variable file that I source to substitute the variables in the *.tf files.
variables.tf
variable "azure_subscription_id" {
type = string
description = "Azure Subscription ID"
default = ""
}
variable "dca" {
type = string
description = "datacenter [dca0|dca2|dca4|dca6]."
default = ""
}
variable "location" {
type = string
description = "Location of the resource group."
default = ""
}
variable "resource_group" {
type = string
description = "resource group name."
default = ""
}
variable "subnet_name" {
type = string
description = "subnet name"
default = ""
}
variable "vnet_name" {
type = string
description = "vnet name"
default = ""
}
variable "vnet_rg" {
type = string
description = "vnet resource group"
default = ""
}
variable "vm_size" {
type = string
description = "vm size"
default = ""
}
variable "os_disk_size_gb" {
type = string
description = "vm os disk size gb"
default = ""
}
variable "data_disk_size_gb" {
type = string
description = "vm data disk size gb"
default = ""
}
variable "admin_username" {
type = string
description = "admin user name"
default = ""
}
variable "ssh_pub_key" {
type = string
description = "public key for admin user"
default = ""
}
variable "source_image_id" {
type = string
description = "image id"
default = ""
}
variable "os_disk_storage_account_type" {
type = string
description = ""
default = ""
}
variable "data_disk_storage_account_type" {
type = string
description = ""
default = ""
}
variable "vm_list" {
type = map(object({
hostname = string
}))
default = {
vm0 ={
hostname = "${dca}-${name}-node-0"
},
vm1 = {
hostname = "${dca}-${name}-node-1"
}
vm2 = {
hostname = "${dca}-${name}-node-2"
}
}
}
variable "disks_per_instance" {
type = string
description = ""
default = ""
}
terraform.tfvars
# subscription
azure_subscription_id = "${azure_subscription_id}"
# name and location
resource_group = "${dca}-${name}"
location = "${location}"
dca = "${dca}"
# Network
subnet_name = "${subnet_name}"
vnet_name = "${dca}vnet"
vnet_rg = "th-${dca}-vnet"
# VM
vm_size = "${vm_size}"
os_disk_size_gb = "${os_disk_size_gb}"
os_disk_storage_account_type = "${os_disk_storage_account_type}"
source_image_id = "${source_image_id}"
# User/key info
admin_username = "${admin_username}"
ssh_pub_key = ${ssh_pub_key}
# data disk info
data_disk_storage_account_type = "${data_disk_storage_account_type}"
data_disk_size_gb = "${data_disk_size_gb}"
disks_per_instance= "${disks_per_instance}"
main.tf
# set locals for multi data disks
locals {
vm_datadiskdisk_count_map = { for k, query in var.vm_list : k => var.disks_per_instance }
luns = { for k in local.datadisk_lun_map : k.datadisk_name => k.lun }
datadisk_lun_map = flatten([
for vm_name, count in local.vm_datadiskdisk_count_map : [
for i in range(count) : {
datadisk_name = format("datadisk_%s_disk%02d", vm_name, i)
lun = i
}
]
])
}
# create resource group
resource "azurerm_resource_group" "resource_group" {
name = format("%s", var.resource_group)
location = var.location
}
# create data disk(s)
resource "azurerm_managed_disk" "managed_disk" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
name = each.key
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
storage_account_type = var.data_disk_storage_account_type
create_option = "Empty"
disk_size_gb = var.data_disk_size_gb
}
# create availability set
resource "azurerm_availability_set" "vm_availability_set" {
name = format("%s-availability-set", var.resource_group)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
}
# create Security Group to access linux
resource "azurerm_network_security_group" "linux_vm_nsg" {
name = format("%s-linux-vm-nsg", var.resource_group)
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
security_rule {
name = "AllowSSH"
description = "Allow SSH"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
# associate the linux NSG with the subnet
resource "azurerm_subnet_network_security_group_association" "linux_vm_nsg_association" {
subnet_id = "${data.azurerm_subnet.subnet.id}"
network_security_group_id = azurerm_network_security_group.linux_vm_nsg.id
}
# create NICs for vms
resource "azurerm_network_interface" "nics" {
depends_on = [azurerm_subnet_network_security_group_association.linux_vm_nsg_association]
for_each = var.vm_list
name = "${each.value.hostname}-nic"
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
ip_configuration {
name = format("%s-proxy-ip", var.resource_group)
subnet_id = "${data.azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
}
}
# create VMs
resource "azurerm_linux_virtual_machine" "vms" {
for_each = var.vm_list
name = each.value.hostname
location = azurerm_resource_group.resource_group.location
resource_group_name = azurerm_resource_group.resource_group.name
network_interface_ids = [azurerm_network_interface.nics[each.key].id]
availability_set_id = azurerm_availability_set.vm_availability_set.id
size = var.vm_size
source_image_id = var.source_image_id
custom_data = filebase64("cloud-init.sh")
admin_username = var.admin_username
disable_password_authentication = true
admin_ssh_key {
username = var.admin_username
public_key = var.ssh_pub_key
}
os_disk {
caching = "ReadWrite"
storage_account_type = var.os_disk_storage_account_type
disk_size_gb = var.os_disk_size_gb
}
}
# attache data disks vms
resource "azurerm_virtual_machine_data_disk_attachment" "managed_disk_attach" {
for_each = toset([for j in local.datadisk_lun_map : j.datadisk_name])
managed_disk_id = azurerm_managed_disk.managed_disk[each.key].id
virtual_machine_id = azurerm_linux_virtual_machine.vms[element(split("_", each.key), 1)].id
lun = lookup(local.luns, each.key)
caching = "ReadWrite"
}
I am trying to create multiple Azure VM and not able to assign VMs to different availability_set. Please see my code below:
Module "vm_dev":
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.82.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "rg_dev" {
name = "MYORG_RG_DEV"
location = var.location
}
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
vm.tf:
module "vm_dev" {
source = "./vm_dev"
vm_size = "Standard_D4s_v3"
hostname_prefix = "myorg"
group_name_prefix = var.group_prefix
location = var.location
subnet_id = local.subnet_id
ssh_key = local.ssh_public_key
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1" }
"aa-elastic-master-1" = { ip = "10.0.100.2" }
"xx-elastic-master-0" = { ip = "10.0.99.1" }
"xx-elastic-master-1" = { ip = "10.0.99.2" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3" }
"aa-elastic-data-1" = { ip = "10.0.100.4" }
"aa-elastic-data-2" = { ip = "10.0.100.5" }
"xx-elastic-data-0" = { ip = "10.0.99.3" }
"xx-elastic-data-1" = { ip = "10.0.99.4" }
"xx-elastic-data-2" = { ip = "10.0.99.5" }
}
}
This works fine and I am able to create VMs. So far each VM is being created without assigning to availability_set. I would like to specify to which availability_set each VM belongs to, something like this:
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1", as = "azurerm_availability_set.as_aamaster.id" }
"aa-elastic-master-1" = { ip = "10.0.100.2", as = "azurerm_availability_set.as_aamaster.id" }
"xx-elastic-master-0" = { ip = "10.0.99.1", as = "azurerm_availability_set.as_xxmaster.id" }
"xx-elastic-master-1" = { ip = "10.0.99.2", as = "azurerm_availability_set.as_xxmaster.id" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-1" = { ip = "10.0.100.4", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-2" = { ip = "10.0.100.5", as = "azurerm_availability_set.as_aadata.id" }
"xx-elastic-data-0" = { ip = "10.0.99.3", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-1" = { ip = "10.0.99.4", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-2" = { ip = "10.0.99.5", as = "azurerm_availability_set.as_xxdata.id" }
}
adding in module following code:
resource "azurerm_availability_set" "as_aamaster" {
name = "${var.hostname_prefix}-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
...
gives me error
Error: Cannot parse Azure ID: parse "azurerm_availability_set.as_aamaster.id": invalid URI for request
on vm_dev/main.tf line 72, in resource "azurerm_linux_virtual_machine" "node_master":
72: availability_set_id = each.value.as
Any advice is appreciated.
Thanks
I tested your code and it failed with the same error as below :
So , For Solution as Ivan Ignatiev has suggested you will have to use the below :
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "${azurerm_availability_set.as_aamaster.id}" }
"aa-elastic-master-1" = { ip = "10.0.2.2", as = "${azurerm_availability_set.as_aamaster.id}" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "${azurerm_availability_set.as_xxmaster.id}" }
"xx-elastic-master-1" = { ip = "10.0.2.4", as = "${azurerm_availability_set.as_xxmaster.id}" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.2.5", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-1" = { ip = "10.0.2.6", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-2" = { ip = "10.0.2.7", as = "${azurerm_availability_set.as_aadata.id}" }
"xx-elastic-data-0" = { ip = "10.0.2.8", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-1" = { ip = "10.0.2.9", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-2" = { ip = "10.0.2.10", as = "${azurerm_availability_set.as_xxdata.id}" }
}
main.tf
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aamaster" {
name = "ansuman-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxmaster" {
name = "ansuman-as-xxmaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aadata" {
name = "ansuman-as-aadata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxdata" {
name = "ansuman-as-xxdata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
Output:
AnsumanBal-MT, this did not work for me, I have added comment above, but i was able to solve this via:
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "0" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "1" }
in module:
resource "azurerm_availability_set" "as_dev" {
count = 5
name = "${var.hostname_prefix}-dev-${element(var.availability_set_name, count.index)}-as"
resource_group_name = azurerm_resource_group.rg_dev.name
location = var.location
}
for azurerm_linux_virtual_machine added:
availability_set_id = azurerm_availability_set.as_dev[each.value.as].id
variable:
variable "availability_set_name" {
description = "Availability set name that the VMs will be created in"
type = list(any)
default = ["aamaster", "xxmaster", "aadata", ....]
}
I’m trying to create a VM in Azure using below config.
resource “azurerm_virtual_machine” “VM38” {
name = “VM38”
resource_group_name = data.azurerm_resource_group.myRG.name
location = data.azurerm_resource_group.myRG.location
vm_size = “Standard_F16s_v2”
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
os_profile {
computer_name = “vm38”
admin_username = “adminuser”
admin_password = “Password1234!”
custom_data = base64encode(data.cloudinit_config.hybrid_vm38_cloudinit_cfg.rendered)
}
os_profile_linux_config {
disable_password_authentication = false
}
storage_image_reference {
id = data.azurerm_image.my_image.id
}
depends_on = [aws_instance.vm12]
storage_os_disk {
name = “VMDisk”
create_option = “FromImage”
caching = “ReadWrite”
#disk_size_gb = 16
#os_type = “Linux”
#managed_disk_type = “Standard_LRS”
vhd_uri = var.vmVHDURI
}
network_interface_ids = [azurerm_network_interface.mgmtNwIntf.id, azurerm_network_interface.transportNwIntf.id]
}
When I execute terraform apply I’m getting below error…
Error: compute.VirtualMachinesClient#CreateOrUpdate: Failure sending request: StatusCode=0 – Original Error: autorest/azure: Service returned an error. Status= Code=“PropertyChangeNotAllowed” Message=“Changing property ‘osDisk.name’ is not allowed.” Target=“osDisk.name”
with azurerm_virtual_machine.VM38,
on az_virtual_machine.tf line 1, in resource “azurerm_virtual_machine” “VM38”:
1: resource “azurerm_virtual_machine” “VM38” {
Please let me know how to resolve this issue.
Terraform and Azure provider version details are given below:
Terraform v1.0.8
on linux_amd64
provider registry.terraform.io/hashicorp/azurerm v2.79.1
Thanks & Regards,
-Ravi
**In terraform.tfvars**
resourceGroupName = "myResourceGroup"
deviceImageName = "myDeviceImageName"
**In cloudinit_config.tf**
data "cloudinit_config" "hybrid_vm38_cloudinit_cfg" {
gzip = false
base64_encode = false
depends_on = [aws_instance.hybrid_vm12]
part {
filename = "cloud-config"
content_type = "text/cloud-config"
content = file("cloudinit/vm38_cloud_config.yaml")
}
part {
filename = "config-C8K.txt"
content_type = "text/cloud-boothook"
content = file("cloudinit/vm38_cloud_boothook.cfg")
}
}
**In az_resource_group.tf**
data "azurerm_resource_group" "vm38RG" {
name = var.resourceGroupName
}
**In az_image.tf**
data "azurerm_image" "deviceImage" {
name = var.deviceImageName
resource_group_name = data.azurerm_resource_group.vm38RG.name
}
**In az_virtual_network.tf**
resource "azurerm_virtual_network" "vm38VirtualNw" {
name = "vm38VirtualNw"
address_space = ["30.0.0.0/16"]
location = "eastus"
resource_group_name = data.azurerm_resource_group.vm38RG.name
tags = {
environment = "My virtual network"
}
}
**In az_subnet.tf**
resource "azurerm_subnet" "vm38MgmtSubnet" {
name = "vm38MgmtSubnet"
resource_group_name = data.azurerm_resource_group.vm38RG.name
virtual_network_name = azurerm_virtual_network.vm38VirtualNw.name
address_prefixes = ["30.0.11.0/24"]
}
resource "azurerm_subnet" "vm38TransportSubnet" {
name = "vm38TransportSubnet"
resource_group_name = data.azurerm_resource_group.vm38RG.name
virtual_network_name = azurerm_virtual_network.vm38VirtualNw.name
address_prefixes = ["30.0.12.0/24"]
}
**In az_network_interface.tf**
resource "azurerm_network_interface" "vm38MgmtNwIntf" {
name = "vm38MgmtNwIntf"
location = data.azurerm_resource_group.vm38RG.location
resource_group_name = data.azurerm_resource_group.vm38RG.name
ip_configuration {
name = "vm38MgmtPvtIP"
subnet_id = azurerm_subnet.vm38MgmtSubnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.vm38MgmtPublicIP.id
}
}
resource "azurerm_network_interface" "vm38TransportNwIntf" {
name = "vm38TransportNwIntf"
location = data.azurerm_resource_group.vm38RG.location
resource_group_name = data.azurerm_resource_group.vm38RG.name
ip_configuration {
name = "vm38TransportPvtIP"
subnet_id = azurerm_subnet.vm38TransportSubnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.vm38TransportPublicIP.id
}
}
**In az_virtual_machine.tf**
resource "azurerm_virtual_machine" "VM38" {
name = "VM38"
resource_group_name = data.azurerm_resource_group.vm38RG.name
location = data.azurerm_resource_group.vm38RG.location
vm_size = "Standard_F16s_v2"
delete_os_disk_on_termination = true
#delete_data_disks_on_termination = true
os_profile {
computer_name = "vm38"
admin_username = "adminuser"
admin_password = "Password1234!"
custom_data = base64encode(data.cloudinit_config.hybrid_vm38_cloudinit_cfg.rendered)
}
os_profile_linux_config {
disable_password_authentication = false
}
storage_image_reference {
id = data.azurerm_image.deviceImage.id
}
depends_on = [aws_instance.hybrid_vm12]
storage_os_disk {
name = "osDisk"
create_option = "FromImage"
caching = "ReadWrite"
#disk_size_gb = 16
#os_type = "Linux"
managed_disk_type = "Standard_LRS"
}
/*
storage_data_disk {
name = "vm38SecondaryDisk"
caching = "ReadWrite"
create_option = "Empty"
disk_size_gb = 2048
lun = 0
managed_disk_type = "Premium_LRS"
}
*/
network_interface_ids = [
azurerm_network_interface.vm38MgmtNwIntf.id,
azurerm_network_interface.vm38TransportNwIntf.id
]
}
You can't change the os_disk name while creating the VM. It should be "osdisk" or something starting with that.
I tested using the below code:
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "example" {
name = "ansuman-resources"
location = "West US 2"
}
resource "azurerm_virtual_network" "example" {
name = "ansuman-network"
address_space = ["10.0.0.0/16"]
location = "${azurerm_resource_group.example.location}"
resource_group_name = "${azurerm_resource_group.example.name}"
}
resource "azurerm_subnet" "example" {
name = "internal"
resource_group_name = "${azurerm_resource_group.example.name}"
virtual_network_name = "${azurerm_virtual_network.example.name}"
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_network_interface" "example" {
name = "ansuman-nic"
location = "${azurerm_resource_group.example.location}"
resource_group_name = "${azurerm_resource_group.example.name}"
ip_configuration {
name = "testconfiguration1"
subnet_id = "${azurerm_subnet.example.id}"
private_ip_address_allocation = "Dynamic"
}
}
# we assume that this Custom Image already exists
data "azurerm_image" "custom" {
name = "ansumantestvm-image-20211007225625"
resource_group_name = "resourcegroup"
}
resource "azurerm_virtual_machine" "example" {
name = "ansuman-vm"
location = "${azurerm_resource_group.example.location}"
resource_group_name = "${azurerm_resource_group.example.name}"
network_interface_ids = ["${azurerm_network_interface.example.id}"]
vm_size = "Standard_F2"
# This means the OS Disk will be deleted when Terraform destroys the Virtual Machine
# NOTE: This may not be optimal in all cases.
delete_os_disk_on_termination = true
storage_image_reference {
id = "${data.azurerm_image.custom.id}"
}
storage_os_disk {
name = "osdisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
os_profile {
computer_name = "hostname"
admin_username = "testadmin"
admin_password = "Password1234!"
}
os_profile_windows_config {
}
}
Output:
Note: Please make sure while creating the image from the original VM , first generalize it . If its not generalized then VM created from the custom image will get stuck in creating state and will not be able to boot up.
If you want to change the osdisk name to something of your choice then as a solution try creating the managed os disk first from the image using create option "copy" or "import" and then attach the disk while creating the VM as creating managed disk from custom image is also not supported ,it can be only done for platform image or marketplace image . You can refer this GitHub issue and this Github issue.
Reference terraform code for similar issue to give custom name to osdisk created from platform image/ market place image which Charles Xu has done in this SO thread.
Terraform code is below
module "centos-vm-author-2" {
source = "terraform.automation.temp.com.au/temp/temp-linux-vm/azurerm"
version = "6.7.0"
location = var.resource_location
resource_group_name = var.resource_group_name_2
vm_count = "1"
tags = local.tags
size = var.vm_size
hostname_prefix = var.hostname_prefix
hostname_suffix_start_range = "491"
image_publisher = "OpenLogic"
image_offer = "Centos"
image_sku = "7_9"
subnet_id = var.auth_pub_subnet_id
admin_username = "azureadmin"
availability_set_id = azurerm_availability_set.aemfeature1authoras.id
patching_tags = local.patching_tags
ansible_vault_key = var.ansible_vault_key
log_to_loganalytics = false
ou_tags = local.ou_tags
os_disk_size = var.os_size_gb
os_disk_type = var.storage_account_type
server_access_memberships = ["CN=DSTDEVOPS,OU=DistributionGroups,OU=Groups,OU=Resources,DC=temp,DC=int"]
sudoers = ["%DSTDEVOPS"]
data_disks = [
[
{
disk_size_gb = var.disk_size_gb
storage_account_type = var.storage_account_type
caching = "ReadWrite"
create_option = "Empty"
source_resource_id = ""
write_accelerator_enabled = false
}
]
]
}
resource "null_resource" "centos-vm-author-ansible" {
provisioner "local-exec" {
command = <<EOF
ansible-playbook -i '${join(",", azurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
EOF
}
depends_on = [
module.centos-vm-author-2
]
}
}
Basically I want to tell Ansible the Private IP onto which it should execute the role.
I am getting error like below,
Error: [0m[0m[1mReference to undeclared resource[0m
on main.tf line 236, in resource "null_resource" "centos-vm-author-ansible":
ansible-playbook -i '${join(",", [4mazurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
A managed resource "azurerm_network_interface" "centos-vm-author-2" has not
been declared in the root module.
Sincerely appreciate any help to understand what is the issue and how to have it resolved.
P.S: The TF Module code is like below:
resource "azurerm_network_interface" "main" {
count = var.vm_count
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-nic"
location = var.location
resource_group_name = var.resource_group_name
enable_accelerated_networking = var.enable_accelerated_networking
ip_configuration {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-ipconfig"
subnet_id = var.subnet_id
private_ip_address_allocation = var.private_ip_address_allocation
private_ip_address = var.private_ip_address
public_ip_address_id = var.enable_public_ip_address ? azurerm_public_ip.main[count.index].id : null
}
tags = var.tags
}
resource "azurerm_network_interface_backend_address_pool_association" "lbconf" {
count = var.backend_address_pool_id == null ? 0 : var.vm_count
network_interface_id = azurerm_network_interface.main[count.index].id
ip_configuration_name = azurerm_network_interface.main[count.index].ip_configuration[0].name
backend_address_pool_id = var.backend_address_pool_id
}
resource "azurerm_linux_virtual_machine" "main" {
count = var.vm_count
name = format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )
location = var.location
resource_group_name = var.resource_group_name
admin_username = var.admin_username
admin_ssh_key {
username = var.admin_username
public_key = chomp(tls_private_key.bootstrap_private_key.public_key_openssh)
}
disable_password_authentication = var.disable_password_authentication
network_interface_ids = [azurerm_network_interface.main[count.index].id]
size = var.size
availability_set_id = var.availability_set_id
source_image_reference {
publisher = var.image_publisher
offer = var.image_offer
sku = var.image_sku
version = var.image_version
}
os_disk {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-osdisk"
caching = "ReadWrite"
storage_account_type = var.os_disk_type
disk_size_gb = var.os_disk_size
}
dynamic "identity" {
for_each = var.identity
content {
type = identity.value["type"]
identity_ids = identity.value["type"] == "SystemAssigned" ? [] : identity.value["identity_ids"]
}
}
dynamic "plan" {
for_each = var.marketplace_image ? [1] : []
content {
name = var.image_sku
product = var.image_offer
publisher = var.image_publisher
}
}
# boot_diagnostics {
# storage_account_uri = var.boot_diagnostics_storage_uri
# }
tags = var.ou_tags == null ? merge(var.tags, var.patching_tags) : merge(var.tags, var.ou_tags, var.patching_tags)
}
To refer to your module, instead of:
azurerm_network_interface.centos-vm-author-2.*.private_ip_address
it should be:
module.centos-vm-author-2.private_ip_addresses