Terraform - Import Azure VMs to state file using modules - azure

I'm creating VMs using the script below beginning with "# Script to create VM". The script is being called from a higher level directory so as to create the VMs using modules, the call looks something like in the code below starting with "#Template..". The problem is that we are missing the state for a few VMs that were created during a previous run. I've tried importing the VM itself but looking at the state file it does not appear anything similar to the ones already there created using the bottom script. Any help would be great.
#Template to call VM Script below
module <virtual_machine_name> {
source = "./vm"
virtual_machine_name = "<virtual_machine_name>"
resource_group_name = "<resource_group_name>"
availability_set_name = "<availability_set_name>"
virtual_machine_size = "<virtual_machine_size>"
subnet_name = "<subnet_name>"
private_ip = "<private_ip>"
optional:
production = true (default is false)
data_disk_name = ["<disk1>","<disk2>"]
data_disk_size = ["50","100"] size is in GB
}
# Script to create VM
data azurerm_resource_group rgdata02 {
name = "${var.resource_group_name}"
}
data azurerm_subnet sndata02 {
name = "${var.subnet_name}"
resource_group_name = "${var.core_resource_group_name}"
virtual_network_name = "${var.virtual_network_name}"
}
data azurerm_availability_set availsetdata02 {
name = "${var.availability_set_name}"
resource_group_name = "${var.resource_group_name}"
}
data azurerm_backup_policy_vm bkpoldata02 {
name = "${var.backup_policy_name}"
recovery_vault_name = "${var.recovery_services_vault_name}"
resource_group_name = "${var.core_resource_group_name}"
}
data azurerm_log_analytics_workspace law02 {
name = "${var.log_analytics_workspace_name}"
resource_group_name = "${var.core_resource_group_name}"
}
#===================================================================
# Create NIC
#===================================================================
resource "azurerm_network_interface" "vmnic02" {
name = "nic${var.virtual_machine_name}"
location = "${data.azurerm_resource_group.rgdata02.location}"
resource_group_name = "${var.resource_group_name}"
ip_configuration {
name = "ipcnfg${var.virtual_machine_name}"
subnet_id = "${data.azurerm_subnet.sndata02.id}"
private_ip_address_allocation = "Static"
private_ip_address = "${var.private_ip}"
}
}
#===================================================================
# Create VM with Availability Set
#===================================================================
resource "azurerm_virtual_machine" "vm02" {
count = var.avail_set != "" ? 1 : 0
depends_on = [azurerm_network_interface.vmnic02]
name = "${var.virtual_machine_name}"
location = "${data.azurerm_resource_group.rgdata02.location}"
resource_group_name = "${var.resource_group_name}"
network_interface_ids = [azurerm_network_interface.vmnic02.id]
vm_size = "${var.virtual_machine_size}"
availability_set_id = "${data.azurerm_availability_set.availsetdata02.id}"
tags = var.tags
# This means the OS Disk will be deleted when Terraform destroys the Virtual Machine
# NOTE: This may not be optimal in all cases.
delete_os_disk_on_termination = true
os_profile {
computer_name = "${var.virtual_machine_name}"
admin_username = "__VMUSER__"
admin_password = "__VMPWD__"
}
os_profile_linux_config {
disable_password_authentication = false
}
storage_image_reference {
id = "${var.image_id}"
}
storage_os_disk {
name = "${var.virtual_machine_name}osdisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
os_type = "Linux"
}
boot_diagnostics {
enabled = true
storage_uri = "${var.boot_diagnostics_uri}"
}
}
#===================================================================
# Create VM without Availability Set
#===================================================================
resource "azurerm_virtual_machine" "vm03" {
count = var.avail_set == "" ? 1 : 0
depends_on = [azurerm_network_interface.vmnic02]
name = "${var.virtual_machine_name}"
location = "${data.azurerm_resource_group.rgdata02.location}"
resource_group_name = "${var.resource_group_name}"
network_interface_ids = [azurerm_network_interface.vmnic02.id]
vm_size = "${var.virtual_machine_size}"
# availability_set_id = "${data.azurerm_availability_set.availsetdata02.id}"
tags = var.tags
# This means the OS Disk will be deleted when Terraform destroys the Virtual Machine
# NOTE: This may not be optimal in all cases.
delete_os_disk_on_termination = true
os_profile {
computer_name = "${var.virtual_machine_name}"
admin_username = "__VMUSER__"
admin_password = "__VMPWD__"
}
os_profile_linux_config {
disable_password_authentication = false
}
storage_image_reference {
id = "${var.image_id}"
}
storage_os_disk {
name = "${var.virtual_machine_name}osdisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
os_type = "Linux"
}
boot_diagnostics {
enabled = true
storage_uri = "${var.boot_diagnostics_uri}"
}
}
#===================================================================
# Set Monitoring and Log Analytics Workspace
#===================================================================
resource "azurerm_virtual_machine_extension" "oms_mma02" {
count = var.bootstrap ? 1 : 0
name = "${var.virtual_machine_name}-OMSExtension"
virtual_machine_id = "${azurerm_virtual_machine.vm02.id}"
publisher = "Microsoft.EnterpriseCloud.Monitoring"
type = "OmsAgentForLinux"
type_handler_version = "1.8"
auto_upgrade_minor_version = true
settings = <<SETTINGS
{
"workspaceId" : "${data.azurerm_log_analytics_workspace.law02.workspace_id}"
}
SETTINGS
protected_settings = <<PROTECTED_SETTINGS
{
"workspaceKey" : "${data.azurerm_log_analytics_workspace.law02.primary_shared_key}"
}
PROTECTED_SETTINGS
}
#===================================================================
# Associate VM to Backup Policy
#===================================================================
resource "azurerm_backup_protected_vm" "vm02" {
count = var.bootstrap ? 1 : 0
resource_group_name = "${var.core_resource_group_name}"
recovery_vault_name = "${var.recovery_services_vault_name}"
source_vm_id = "${azurerm_virtual_machine.vm02.id}"
backup_policy_id = "${data.azurerm_backup_policy_vm.bkpoldata02.id}"}

On my understanding that you do not understand the Terraform Import clearly. So I would show you what does it mean.
When you want to import the pre-existing resources, you need to configure the resource in the Terraform files first that how the existing resources configured. And all the resources would be imported into the state files.
Another caveat currently is that only a single resource can be imported into a state file at a time.
When you want to import the resources into a module, I assume the folder structure like this:
testingimportfolder
└── main.tf
└── terraform.tfstate
└── terraform.tfstate.backup
└───module
└── main.tf
And the main.tf file in the folder testingimportfolder set the module block liek this:
module "importlab" {
source = "./module"
...
}
And after you finish importing all the resources into the state file, and then you can see the output of the command terraform state list like this:
module.importlab.azurerm_network_security_group.nsg
module.importlab.azurerm_resource_group.rg
module.importlab.azurerm_virtual_network.vnet
All the resource name should like module.module_name.azurerm_xxxx.resource_name. If you use the module inside the module, I assume the folder structure like this:
importmodules
├── main.tf
├── modules
│   └── vm
│   ├── main.tf
│   └── module
│   └── main.tf
And the file importmodules/modules/vm/main.tf like this:
module "azurevm" {
source = "./module"
...
}
Then after you finish importing all the resources into the state file, and then you can see the output of the command terraform state list like this:
module.vm.module.azurevm.azurerm_network_interface.example
Yes, it just likes what you have got. The state file will store your existing resources as you quote the modules one by one. So you need to plan your code and modules carefully and clearly. Or you will make yourself confused.

Related

Terraform resources changes configuration items every time its deployed

I have worked with both the AWS and Azure providers in terraform and both times I have experienced an issue with "toggling" configuration items.
My terraform resources look like this:
resource "azurerm_resource_group" "sample" {
name = "sample"
location = "uksouth"
}
resource "azurerm_storage_account" "sample" {
name = "samplestackoverflow"
resource_group_name = azurerm_resource_group.sample.name
location = azurerm_resource_group.sample.location
account_tier = "Standard"
account_replication_type = "LRS"
min_tls_version = "TLS1_2"
}
resource "azurerm_service_plan" "sample" {
name = "sample"
resource_group_name = azurerm_resource_group.sample.name
location = azurerm_resource_group.sample.location
os_type = "Linux"
sku_name = "Y1"
}
resource "azurerm_linux_function_app" "sample" {
name = "samplestackoverflow"
resource_group_name = azurerm_resource_group.sample.name
location = azurerm_resource_group.sample.location
storage_account_name = azurerm_storage_account.sample.name
storage_account_access_key = azurerm_storage_account.sample.primary_access_key
service_plan_id = azurerm_service_plan.sample.id
https_only = true
client_certificate_mode = "Required"
functions_extension_version = "~4"
site_config {
application_stack {
python_version = "3.8"
}
}
}
Now the issue itself is that every time I run terraform apply and there are changes to be made, for example: change https_only from true to false, the site_config item is removed. So if I run terraform apply immediatelly after those changes are made, then that site_config that disappeared will be re-added. The output looks like this:
~ site_config {
# (33 unchanged attributes hidden)
+ application_stack {
+ python_version = "3.8"
+ use_dotnet_isolated_runtime = false
}
}
As I mentioned, this happens also with other providers and resources (I remember it happening to me for AWS API Gateway too). I can of course come around this by every time terrafrom apply twice. But I was wondering if there was something that could be done here?

Is version mandatory while creating an Azure VM using terraform?

So I have been working with terraform since last 3 weeks and have been trying to use it to create self hosted GitHub Actions runners in our Azure account.
We have a shared windows VM image in Azure Compute Gallery that I'm planning to use as base image for the GA runner. I have noticed that these shared windows VM images do not generally have any versions attached to them they just have a publisher, offer and SKU attached.
I also verified by creating a new image from a VM to check if somebody missed attaching the version to the VM, but no shared images do not really have a version attached.
Yeah they do have versions but it is not attached as it is for Microsoft Platform Images.
Example of a shared image:
Now I found that in terraform, runners can be created by using both: azurerm_windows_virtual_machine and azurerm_virtual_machine resources.
I used both of them to test the runner creation, below are the terraform code used:
data "azurerm_shared_image" "win19_gold_image" {
provider = azurerm.gi
name = "Windows-2019_base"
gallery_name = data.azurerm_shared_image_gallery.cap_win_gold_image_gallery.name
resource_group_name = "gi-rg"
}
resource "azurerm_virtual_machine" "win_runners_gold_image_based" {
provider = azurerm.og
name = "ga-win-gold-1"
location = "East US"
count = "1" # if I need to increase the number of VMs.
resource_group_name = data.azurerm_resource_group.dts_rg.name
network_interface_ids = [azurerm_network_interface.azure_win_runner_gold_nic[count.index].id,]
vm_size = "Standard_D4ads_v5"
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
storage_image_reference {
publisher = data.azurerm_shared_image.win19_gold_image.identifier[0].publisher
offer = data.azurerm_shared_image.win19_gold_image.identifier[0].offer
sku = data.azurerm_shared_image.win19_gold_image.identifier[0].sku
# Here I get the error: Error: compute.VirtualMachinesClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="InvalidParameter" Message="The value of parameter imageReference.version is invalid." Target="imageReference.version"
}
storage_os_disk {
name = "ga-win-gold-os-disk-1"
caching = "None"
create_option = "FromImage"
managed_disk_type = "StandardSSD_LRS"
}
os_profile {
computer_name = "ga-win-gold-1"
admin_username = "svc"
admin_password = var.WINDOWS_ADMIN_PASS
}
os_profile_windows_config {
enable_automatic_upgrades = true
provision_vm_agent = true
}
storage_data_disk {
name = "ga-win-gold-data-disk-1"
caching = "None"
create_option = "Empty"
disk_size_gb = var.disk_size_gb
lun = 0
managed_disk_type = "StandardSSD_LRS"
}
}
OR
data "azurerm_shared_image" "win19_gold_image" {
provider = azurerm.gi
name = "Windows-2019_base"
gallery_name = data.azurerm_shared_image_gallery.cap_win_gold_image_gallery.name
resource_group_name = "gi-rg"
}
resource "azurerm_windows_virtual_machine" "azure_win_runner" {
provider = azurerm.og
name = "vm-github-actions-win-${count.index}"
resource_group_name = data.azurerm_resource_group.dts_rg.name
location = "East US"
size = var.windows-vm-size
count = "${var.number_of_win_az_instances}"
network_interface_ids = [
azurerm_network_interface.azure_win_runner_nic[count.index].id,
]
computer_name = "vm-ga-win-${count.index}"
admin_username = var.windows-admin-username
admin_password = var.WINDOWS_ADMIN_PASS
os_disk {
name = "vm-github-actions-win-${count.index}-os-disk"
caching = "None"
storage_account_type = "StandardSSD_LRS"
}
source_image_reference {
publisher = data.azurerm_shared_image.win19_gold_image.identifier[0].publisher
offer = data.azurerm_shared_image.win19_gold_image.identifier[0].offer
sku = data.azurerm_shared_image.win19_gold_image.identifier[0].sku
version = data.azurerm_shared_image.win19_gold_image.identifier[0].version # says this object does not have a version attached to it.
# or version = "latest" or any other correct version string will throw error at time of apply that such a version does not exist.
}
enable_automatic_updates = true
provision_vm_agent = true
}
If I'm using azurerm_virtual_machine then if I ignore the version in storage_image_reference I receive the error:
Error: compute.VirtualMachinesClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="InvalidParameter" Message="The value of parameter imageReference.version is invalid." Target="imageReference.version"
And if I add the version then I receive the error
Error: Unsupported attribute.
This object does not have an attribute named "version".
When using azurerm_windows_virtual_machine if I remove the version argument terraform complains that version is required and when provided a sting such as 1.0.0 or latest, while applying(terraform apply) it would complain that such a version does not exist.
And if I pull the version from data.azurerm_shared_image.cap_win19_gold_image it would complain that this object does not have a version.
I am confused as to how to use shared images for VM creation using terraform if version is mandatory yet if version is not available for azure shared images. Please advise on what am I missing?
Any help would be appreciated.
Thanks,
Sekhar
It seems to get a version of the image you need to use another resource [1] and another data source [2]:
data "azurerm_image" "win19_gold_image" {
name = "Windows-2019_base"
resource_group_name = "gi-rg"
}
resource "azurerm_shared_image_version" "win19_gold_image" {
name = "0.0.1"
gallery_name = data.azurerm_shared_image.win19_gold_image.gallery_name
image_name = data.azurerm_shared_image.win19_gold_image.name
resource_group_name = data.azurerm_shared_image.win19_gold_image.resource_group_name
location = data.azurerm_shared_image.win19_gold_image.location
managed_image_id = data.azurerm_image.win19_gold_image.id
}
And then in the source_image_reference block in the azurerm_windows_virtual_machine resource:
source_image_reference {
publisher = data.azurerm_shared_image.win19_gold_image.identifier[0].publisher
offer = data.azurerm_shared_image.win19_gold_image.identifier[0].offer
sku = data.azurerm_shared_image.win19_gold_image.identifier[0].sku
version = azurerm_shared_image_version.win19_gold_image.name
}
As it seems the name argument is actually the version of the image [3]:
name - (Required) The version number for this Image Version, such as 1.0.0. Changing this forces a new resource to be created.
[1] https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/shared_image_version
[2] https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/image
[3] https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/shared_image_version#name
Hi All who come across this question,
I found the solution to my issue. All I had to do was define a azurerm_shared_image_version data and then use source_image_id in azurerm_windows_virtual_machine in place of source_image_reference{} block.
Below is what I did:
data "azurerm_shared_image_gallery" "win_gold_image_gallery" {
provider = azurerm.gi
name = "golden_image_gallery"
resource_group_name = "gi-rg"
}
data "azurerm_shared_image" "win19_gold_image" {
provider = azurerm.gi
name = "Windows-2019_base"
gallery_name = data.azurerm_shared_image_gallery.win_gold_image_gallery.name
resource_group_name = data.azurerm_shared_image_gallery.win_gold_image_gallery.resource_group_name
}
data "azurerm_shared_image_version" "win19_gold_image_version" {
provider = azurerm.gi
name = "latest" # "recent" is also a tag to use the most recent image version
image_name = data.azurerm_shared_image.win19_gold_image.name
gallery_name = data.azurerm_shared_image.win19_gold_image.gallery_name
resource_group_name = data.azurerm_shared_image.win19_gold_image.resource_group_name
}
resource "azurerm_windows_virtual_machine" "azure_win_gi_runner" {
provider = azurerm.dep
name = "vm-github-actions-win-gi-${count.index}"
resource_group_name = data.azurerm_resource_group.dts_rg.name
location = "East US"
size = var.windows-vm-size
count = "${var.number_of_win_gi_az_instances}"
network_interface_ids = [
azurerm_network_interface.azure_win_gi_runner_nic[count.index].id,
]
computer_name = "ga-win-gi-${count.index}"
admin_username = var.windows-admin-username
admin_password = var.WINDOWS_ADMIN_PASS
os_disk {
name = "vm-github-actions-win-gi-${count.index}-os-disk"
caching = "None"
storage_account_type = "StandardSSD_LRS"
}
source_image_id = data.azurerm_shared_image_version.win19_gold_image_version.id
# This is the thing I was missing.
enable_automatic_updates = true
provision_vm_agent = true
tags = {
whichVM = var.gh_windows_runner
environment = var.environment
}
}

Terraform Azure Configure VM Backup Policy Fails

I am trying to create a backup policy and enable backup while provision the Azure VM using terraform (Terraform Version - 1.1.13, Azure Provider - 2.90.0). Terraform fails to enable backup with the below error.
Error: waiting for the Azure Backup Protected VM "VM;iaasvmcontainerv2;Test-product-cloud-infra;arulazurebkup-vm" to be true (Resource Group "Test-Product-Cloud-Infra") to provision: context deadline exceeded
│
│ with azurerm_backup_protected_vm.backup,
│ on main.tf line 176, in resource "azurerm_backup_protected_vm" "backup":
│ 176: resource "azurerm_backup_protected_vm" "backup" {
│
Terraform Scripts
resource "azurerm_backup_policy_vm" "example" {
name = "Test-backup-policy"
resource_group_name = "Test-Product-Cloud-Infra"
recovery_vault_name = "backuptest"
backup {
frequency = "Daily"
time = "23:00"
}
retention_daily {
count = 7
}
}
resource "azurerm_backup_protected_vm" "backup" {
resource_group_name = "Test-Product-Cloud-Infra"
recovery_vault_name = "backuptest"
source_vm_id = azurerm_virtual_machine.example.id
backup_policy_id = azurerm_backup_policy_vm.example.id
depends_on = [azurerm_virtual_machine.example,
azurerm_virtual_machine_extension.example,
azurerm_backup_policy_vm.example]
}
When i check the error in Azure portal for the backup job, i find the below entry
On further troubleshooting getting the below when enabling backup in CLI.
You are getting the error as you are using a recovery vault which is not present in the same location as the VM .
I tested the same as below :
I created the VM in West US and the existing Recovery Services Vault was in East US. So ,I got the below error :
To solve the issue ,You have to use the same location for all the resources as the Recovery Services Vault i.e. in my case same as the resource group (East US):
resource "azurerm_virtual_machine" "main" {
name = "ansuman-vm"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
network_interface_ids = [azurerm_network_interface.example.id]
vm_size = "Standard_DS1_v2"
# Uncomment this line to delete the OS disk automatically when deleting the VM
# delete_os_disk_on_termination = true
# Uncomment this line to delete the data disks automatically when deleting the VM
# delete_data_disks_on_termination = true
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
storage_os_disk {
name = "myosdisk1"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
os_profile {
computer_name = "hostname"
admin_username = "testadmin"
admin_password = "Password1234!"
}
os_profile_linux_config {
disable_password_authentication = false
}
}
data "azurerm_recovery_services_vault" "example" {
name = "recoveryvaultansuman"
resource_group_name = data.azurerm_resource_group.example.name
}
resource "azurerm_backup_policy_vm" "example" {
name = "ansuman-recovery-vault-policy"
resource_group_name = data.azurerm_resource_group.example.name
recovery_vault_name = data.azurerm_recovery_services_vault.example.name
backup {
frequency = "Daily"
time = "23:00"
}
retention_daily {
count = 7
}
}
resource "azurerm_backup_protected_vm" "vm1" {
resource_group_name = data.azurerm_resource_group.example.name
recovery_vault_name = data.azurerm_recovery_services_vault.example.name
source_vm_id = azurerm_virtual_machine.main.id
backup_policy_id = azurerm_backup_policy_vm.example.id
}
Output:

Error: Reference to undeclared module on modules/site_recovery/main.tf

I have created resource, network and compute module in terraform, now want to pass output of vm_id to site recovery module here are the files I am using currently.
Continue to subject: in resource "azurerm_site_recovery_replicated_vm" "vm-replication": source_vm_id= module.compute.vm_id
This is directory structure that I am following currently,
.
├── main.tf
└── modules
├── compute
│   ├── main.tf
│   ├── outputs.tf_bk
│   ├── variable.tf
│   └── variable.tfvars
├── network
│   ├── main.tf
│   ├── variable.tf
│   └── variable.tfvars
├── resource
│   ├── main.tf
│   ├── variable.tf
│   └── variable.tfvars
└── site_recovery
├── main.tf
├── variable.tf
└── variable.tfvars
root module main.cf file:
#Select provider
provider "azurerm" {
subscription_id = "xxxxxxxxxxxxxxxxxxxxxxxx"
version = "~> 2.4"
features {}
}
module "resource" {
source = "./modules/resource"
resource_group_name = "devops_primary"
location = "southeastasia"
}
module "network" {
source = "./modules/network"
virtual_network = "primaryvnet"
subnet = "primarysubnet"
address_space = "192.168.0.0/16"
address_prefix = "192.168.1.0/24"
public_ip = "backendvmpip"
location = "southeastasia"
primary_nic = "backendvmnic"
primary_ip_conf = "backendvm"
resource_group_name = "module.resource.primary_group_name"
}
module "compute" {
source = "./modules/compute"
#resource_group_name = "devops_primary"
#location = "southeastasia"
vm_name = "backendvm-primary"
vm_size = "standard_d2s_v3"
vm_storage_od_disk_name = "backend-vm-os-disk-primary"
computer_name = "backendserver"
username = "terraform"
ssh_key_path = "/home/terraform/.ssh/authorized_keys"
keys_data = "~/.ssh/id_rsa.pub"
sa_name = "primarysa"
disk_name = "backenddisk_primary"
}
module "site_recovery" {
source = "./modules/site_recovery"
#resource_group_name = "devops_primary"
#location = "southeastasia"
sec_resource_group = "devops_secondary"
recovery_vault_name = "recovery-vault"
primary_fabric = "devops_primary-fabric"
seconday_fabric = "devops_secondary-fabric"
primary_container = "primary-protection-container"
secondary_container = "secondary-protection-container"
policy_name = "policy"
container_mapping = "container-mapping"
replicated_vm = "backendvm-replication"
}
compute main.cf :
#Create VM in Primary resource
resource "azurerm_virtual_machine" "primary" {
name = "var.vm_name"
location = "module.resource.azurerm_resource_group.primary.location"
resource_group_name = "module.resource.azurerm_resource_group.primary.name"
vm_size = "var.vm_size"
network_interface_ids = ["module.resource.azurerm_network_interface.primary.id"]
storage_os_disk {
name = "var.vm_storage_od_disk_name"
os_type = "Linux"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
os_profile {
computer_name = "var.computer_name"
admin_username = "var.username"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/terraform/.ssh/authorized_keys"
key_data = file("~/.ssh/id_rsa.pub")
}
}
tags = {
environment = "Test"
}
output "vm_ids" {
description = "Virtual machine ids created."
value = azurerm_virtual_machine.primary.id
#depends_on = [azurerm_virtual_machine.primary.primary]
}
site recovery main.cf
#Create Site Recovery Replicated VM
resource "azurerm_site_recovery_replicated_vm" "vm-replication" {
name = var.replicated_vm
resource_group_name = azurerm_resource_group.secondary.name
recovery_vault_name = azurerm_recovery_services_vault.vault.name
source_recovery_fabric_name = azurerm_site_recovery_fabric.primary.name
#source_vm_id = site recovery main.cf
#Create Site Recovery Replicated VM
resource "azurerm_site_recovery_replicated_vm" "vm-replication" {
name = var.replicated_vm
resource_group_name = azurerm_resource_group.secondary.name
recovery_vault_name = azurerm_recovery_services_vault.vault.name
source_recovery_fabric_name = azurerm_site_recovery_fabric.primary.name
#source_vm_id = "module.compute.azurerm_virtual_machine.primary.id"
source_vm_id = module.compute.vm_ids
recovery_replication_policy_id = azurerm_site_recovery_replication_policy.policy.id
source_recovery_protection_container_name = azurerm_site_recovery_protection_container.primary.name
target_resource_group_id = azurerm_resource_group.secondary.id
target_recovery_fabric_id = azurerm_site_recovery_fabric.secondary.id
target_recovery_protection_container_id = azurerm_site_recovery_protection_container.secondary.id
managed_disk {
disk_id = "[module.resource.azurerm_virtual_machine.primary.storage_os_disk[0].managed_disk_id]"
staging_storage_account_id = "module.resource.azurerm_storage_account.primary.id"
target_resource_group_id = azurerm_resource_group.secondary.id
target_disk_type = "Premium_LRS"
target_replica_disk_type = "Premium_LRS"
}
managed_disk {
disk_id = "[module.resource.azurerm_managed_disk.primary.id]"
staging_storage_account_id = "[module.resource.azurerm_storage_account.primary.id]"
target_resource_group_id = azurerm_resource_group.secondary.id
target_disk_type = "Premium_LRS"
target_replica_disk_type = "Premium_LRS"
}
depends_on = ["module.compute.vm_ids"]
}
Used depends_on for input to site_recovery module, again will you please suggest, how can I output managed disks ids and Os disks ids from compute module and use input in site recovery module.
For the error
Error: Reference to undeclared module on modules/site_recovery/main.tf
It means the referenced module is not declared in the calling module.
To call a module means to include the contents of that module into the configuration with specific values for its input variables. Modules are called from within other modules using module blocks. You need to add the module block in the configuration .tf file where you want to call that module. See calling a child module.
It seems that there are no module blocks declared in your sub site recovery and compute main.tf, so you can not call the resource modules such as module.resource.azurerm_resource_group.primary.location, module.resource.azurerm_managed_disk.primary.id and so on.
As your directory structure, you can also use input variable to call the module from another module output. The correct expression is module.<MODULE NAME>.<OUTPUT NAME>.
To output the VM id and managed disks id from compute module like this:
output "azurerm_vm_id" {
value = azurerm_virtual_machine.primary.id
}
output "primary_os_disk_id" {
value = azurerm_virtual_machine.primary.storage_os_disk[0].managed_disk_id
}
The main.tf in the root directory
module "vm" {
source = "./modules/vm"
vm_name = "backendvm-primary"
vm_size = "standard_d2s_v3"
vm_storage_od_disk_name = "backend-vm-os-disk-primary"
computer_name = "backendserver"
username = "terraform"
nic_ids = module.network.primary_nic_id
resource_group_name = module.resource.rg_name
location = module.resource.rg_location
#ssh_key_path = "/home/terraform/.ssh/authorized_keys"
#keys_data = "~/.ssh/id_rsa.pub"
}
module "site_recovery" {
source = "./modules/site_recovery"
resource_group_name = module.resource.rg_name
location = module.resource.rg_location
sec_resource_group = "nancy_secondary"
sec_location = "eastus"
recovery_vault_name = "recovery-vault"
primary_fabric = "devops_primary-fabric"
seconday_fabric = "devops_secondary-fabric"
primary_container = "primary-protection-container"
secondary_container = "secondary-protection-container"
policy_name = "policy"
container_mapping = "container-mapping"
replicated_vm = "backendvm-replication"
source_vm_id = module.vm.azurerm_vm_id
primary_os_disk_id = module.vm.primary_os_disk_id
}
The Site Recovery main.tf file
#Create Site Recovery Replicated VM
resource "azurerm_site_recovery_replicated_vm" "vm-replication" {
depends_on = [var.vm_depends_on]
name = var.replicated_vm
resource_group_name = azurerm_resource_group.secondary.name
recovery_vault_name = azurerm_recovery_services_vault.vault.name
source_recovery_fabric_name = azurerm_site_recovery_fabric.primary.name
source_vm_id = var.source_vm_id
recovery_replication_policy_id = azurerm_site_recovery_replication_policy.policy.id
source_recovery_protection_container_name = azurerm_site_recovery_protection_container.primary.name
target_resource_group_id = azurerm_resource_group.secondary.id
target_recovery_fabric_id = azurerm_site_recovery_fabric.secondary.id
target_recovery_protection_container_id = azurerm_site_recovery_protection_container.secondary.id
managed_disk {
disk_id = var.primary_os_disk_id
staging_storage_account_id = azurerm_storage_account.primary.id
target_resource_group_id = azurerm_resource_group.secondary.id
target_disk_type = "Premium_LRS"
target_replica_disk_type = "Premium_LRS"
}
}
In fact, in the azurerm_site_recovery_replicated_vm block, there is an implicit dependencies source_vm_id, it replys on the source Azure VM. If you want to use terraform depends_on meta-argument accepts a list of resources with module. You can refer to this thread - Terraform depends_on with modules and this document.

Forcing azurerm extension to wait until vm is deployed

I am running into an issue that is preventing my use of Terraform at the moment and wanted to see if anyone has seen the same behavior. I am using count to deploy multiple VM's along with a dsc extension for each VM.
Because I need the dsc extension to run on the first machine before running on the second machine, I attempted to use the depends_on property for the extension but due to the way I an using interpolation for machine naming, it fails due interpolation not being supported in depends_on.
Does anyone know a way around this? I have tested also tested pushing the machine names into a data resource but once again, I need the depends_on property to support interpolation.
resource "azurerm_virtual_machine" "Server" {
name = "${format("${var.customerProject}${var.environment}${var.machineAcronyms["Server"]}%02d", count.index + 1)}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_interface_ids = ["${element(azurerm_network_interface.Server_NIC.*.id, count.index)}"]
vm_size = "${var.Server_Specs["ServerType"]}"
count = "${var.Server_Specs["Number_of_Machines"]}"
storage_image_reference {
publisher = "${var.Server_Specs["Image_Publisher"]}"
offer = "${var.Server_Specs["Image_Offer"]}"
sku = "${var.Server_Specs["Image_sku"]}"
version = "${var.Server_Specs["Image_Version"]}"
}
plan {
name = "${var.Server_Specs["Plan_Name"]}"
publisher = "${var.Server_Specs["Plan_Publisher"]}"
product = "${var.Server_Specs["Plan_Product"]}"
}
os_profile {
computer_name = "${format("${var.customerProject}${var.environment}${var.machineAcronyms["Server"]}%02d", count.index + 1)}"
admin_username = "${var.AdminCredentials["Username"]}"
admin_password = "${var.AdminCredentials["Password"]}"
}
os_profile_windows_config {
provision_vm_agent = "true"
}
}
resource "azurerm_virtual_machine_extension" "Server_DSC" {
name = "${format("${var.customerProject}${var.environment}${var.machineAcronyms["Server"]}%02d", count.index + 1)}-dsc"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
virtual_machine_name = "${format("${var.customerProject}${var.environment}${var.machineAcronyms["Server"]}%02d", count.index + 1)}"
publisher = "Microsoft.Powershell"
type = "DSC"
type_handler_version = "${var.dsc_extension}"
auto_upgrade_minor_version = true
depends_on = ["azurerm_storage_share.fileShare"]
count = "${var.Server_Specs["Number_of_Machines"]}"
settings = <<SETTINGS
{
"configuration": {
"url": "${var.resourceStore["fileShareUrl"]}${var.resourceStore["dscArchiveName"]}${var.azureCredentials["storageKey"]}",
"function": "contenthostingha",
"script": "contenthostingha.ps1"
},
"configurationArguments": {
"ExternalDNS": "${var.externalDNS}",
"NumberOfMachines": "${var.Server_Specs["Number_of_Machines"]}",
"AzureFileUrl": "azurerm_storage_share.fileShare.url",
"AzureFileShareKey": "${azurerm_storage_account.storageAccount.secondary_access_key}"
}
}
SETTINGS
protected_settings = <<PROTECTED_SETTINGS
{
"configurationArguments": {}
}
PROTECTED_SETTINGS
}
I haven't tried, but you might duplicate the Server_DSC resource (e.g. Server_DSC_0 and Server_DSC_nth), the _0 will use a fixed "0" instead of count and the number of instances should be lessen by 1, e.g using a local variable that subtract one to the original variable.

Resources