Error: host for provisioner cannot be empty - linux

I am working on the main.tf file for creating a virtual machine in azure with remote execution and also I would like to create and download the SSH key .pem file in this file to access Linux VM.
main. tf file
# Configure the Microsoft Azure Provider
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.0"
}
}
}
provider "azurerm" {
features {}
subscription_id = var.subscription_id
client_id = var.client_id
client_secret = var.client_secret
tenant_id = var.tenant_id
}
# Create a resource group if it doesn't exist
resource "azurerm_resource_group" "myterraformgroup" {
name = var.resource_group
location = var.resource_group_location
tags = {
environment = "Terraform Demo"
}
}
# Create virtual network
resource "azurerm_virtual_network" "myterraformnetwork" {
name = "myVnet"
address_space = ["10.0.0.0/16"]
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
tags = {
environment = "Terraform Demo"
}
}
# Create subnet
resource "azurerm_subnet" "myterraformsubnet" {
name = "mySubnet"
resource_group_name = azurerm_resource_group.myterraformgroup.name
virtual_network_name = azurerm_virtual_network.myterraformnetwork.name
address_prefixes = ["10.0.1.0/24"]
}
# Create public IPs
resource "azurerm_public_ip" "myterraformpublicip" {
name = "myPublicIP"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
allocation_method = "Dynamic"
tags = {
environment = "Terraform Demo"
}
}
# Create Network Security Group and rule
resource "azurerm_network_security_group" "myterraformnsg" {
name = "myNetworkSecurityGroup"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
security_rule {
name = "SSH"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Terraform Demo"
}
}
# Create network interface
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
ip_configuration {
name = "myNicConfiguration"
subnet_id = azurerm_subnet.myterraformsubnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.myterraformpublicip.id
}
tags = {
environment = "Terraform Demo"
}
}
# Connect the security group to the network interface
resource "azurerm_network_interface_security_group_association" "example" {
network_interface_id = azurerm_network_interface.myterraformnic.id
network_security_group_id = azurerm_network_security_group.myterraformnsg.id
}
# Generate random text for a unique storage account name
resource "random_id" "randomId" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = azurerm_resource_group.myterraformgroup.name
}
byte_length = 8
}
# Create storage account for boot diagnostics
resource "azurerm_storage_account" "mystorageaccount" {
name = "diag${random_id.randomId.hex}"
resource_group_name = azurerm_resource_group.myterraformgroup.name
location = "eastus"
account_tier = "Standard"
account_replication_type = "LRS"
tags = {
environment = "Terraform Demo"
}
}
# Create (and display) an SSH key
resource "tls_private_key" "example_ssh" {
algorithm = "RSA"
rsa_bits = 2048
}
output "tls_private_key" {
value = tls_private_key.example_ssh.private_key_pem
sensitive = true
}
# Create virtual machine
resource "azurerm_linux_virtual_machine" "myterraformvm" {
name = "myVM"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
network_interface_ids = [azurerm_network_interface.myterraformnic.id]
size = "Standard_DS1_v2"
os_disk {
name = "myOsDisk"
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
computer_name = "myvm"
admin_username = "azureuser"
disable_password_authentication = true
admin_ssh_key {
username = "azureuser"
public_key = tls_private_key.example_ssh.public_key_openssh
}
boot_diagnostics {
storage_account_uri = azurerm_storage_account.mystorageaccount.primary_blob_endpoint
}
tags = {
environment = "Terraform Demo"
}
}
resource "null_resource" "execute" {
connection {
type = "ssh"
agent = false
user = "azureuser"
host = azurerm_public_ip.myterraformpublicip.ip_address
private_key = tls_private_key.example_ssh.private_key_pem
}
provisioner "file" {
source = "./config"
destination = "~/"
}
provisioner "remote-exec" {
inline = [
"chmod 755 ~/scripts/*",
"sudo sh ~/scripts/foreman_prerequisite_config.sh",
]
}
depends_on = [azurerm_linux_virtual_machine.myterraformvm]
}
Facing the below error when using command terraform apply
[0m[1mnull_resource.execute: Provisioning with 'file'...[0m[0m
[31m╷[0m[0m
[31m│[0m [0m[1m[31mError: [0m[0m[1mfile provisioner error[0m
[31m│[0m [0m
[31m│[0m [0m[0m with null_resource.execute,
[31m│[0m [0m on main.tf line 184, in resource "null_resource" "execute":
[31m│[0m [0m 184: provisioner "file" [4m{[0m[0m
[31m│[0m [0m
[31m│[0m [0mhost for provisioner cannot be empty
Please help me to resolve this issue. Thanks in advance!

According to the Azure provider documentation [1], when the public IP allocation type is Dynamic, you should use the data source to get the IP address:
data "azurerm_public_ip" "myterraformpublicip" {
name = azurerm_public_ip.myterraformpublicip.name
resource_group_name = azurerm_linux_virtual_machine.myterraformvm.resource_group_name
}
Then, in the host argument of the null_resource you should set the following:
host = data.azurerm_public_ip.myterraformpublicip.ip_address
However, this might not fix the issue you have as it seems there is a problem with this version of Azure provider for Linux VMs [2]:
In this release there's a known issue where the public_ip_address and public_ip_addresses fields may not be fully populated for Dynamic Public IP's.
The second part of the question was related to generating an SSH key which can be used later on to access a VM. In your question you have this code:
resource "tls_private_key" "example_ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" {
value = tls_private_key.example_ssh.private_key_pem
sensitive = true
}
The output is not needed based on the answer you linked in the comments [3]. This can be used to create a private key in the same directory:
resource "tls_private_key" "example_ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
resource "local_file" "private_key_file" {
content = tls_private_key.example_ssh.private_key_pem
filename = "${path.root}/private-key.pem"
}
Then, in the null_resource, you should add the following:
resource "null_resource" "execute" {
connection {
type = "ssh"
agent = false
user = "azureuser"
host = data.azurerm_public_ip.myterraformpublicip.ip_address
private_key = "${path.root}/private-key.pem"
}
provisioner "file" {
source = "./config"
destination = "~/"
}
provisioner "remote-exec" {
inline = [
"chmod 755 ~/scripts/*",
"sudo sh ~/scripts/foreman_prerequisite_config.sh",
]
}
}
depends_on = [azurerm_linux_virtual_machine.myterraformvm]
}
Note that you probably should not use the tls_private_key resource for production environments [4]:
The private key generated by this resource will be stored unencrypted in your Terraform state file. Use of this resource for production deployments is not recommended. Instead, generate a private key file outside of Terraform and distribute it securely to the system where Terraform will be run.
[1] https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/public_ip#example-usage-retrieve-the-dynamic-public-ip-of-a-new-vm
[2] https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/linux_virtual_machine#:~:text=In%20this%20release%20there%27s%20a%20known%20issue%20where%20the%20public_ip_address%20and%20public_ip_addresses%20fields%20may%20not%20be%20fully%20populated%20for%20Dynamic%20Public%20IP%27s.
[3] https://stackoverflow.com/a/67379867/8343484
[4] https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key

Related

A managed resource "azurerm_network_interface" "myterraformnic" has not been declared in module.vm. Azure

I have created two modules:
Network 2. VM
In network.tf I have added network configuration thing for azure and in vm.tf I am trying to create a Virtual Machine. Calling both modules from main.tf I'm getting this error:
on ..\modules\vm\vm.tf line 20, in resource "azurerm_windows_virtual_machine" "myterraformvm":
20: network_interface_ids = [azurerm_network_interface.myterraformnic.id]
A managed resource "azurerm_network_interface" "myterraformnic" has not been
declared in module.vm.
This is the code of vm.tf
# Resource Group
resource "azurerm_resource_group" "myterraformgroup" {
name = "myResourceGroup"
location = "eastus"
}
resource "azurerm_image" "example" {
# (resource arguments)
name = "WINDOWS2019C"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
}
# Virtual Machine Properties
resource "azurerm_windows_virtual_machine" "myterraformvm" {
name = "myVM"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
network_interface_ids = [azurerm_network_interface.myterraformnic.id]
size = "Standard_DS1_v2"
admin_username = "XXXXXXXXXXXXX"
admin_password = "XXXXXXXXXXXXXX"
os_disk {
name = "myOsDisk"
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
source_image_id = "/subscriptions/XXXXXXXXXXXXXXXXXXX/soft.Compute/images/XXXXXXXX"
}
This is the code of network.tf
#Create virtual network
resource "azurerm_resource_group" "myterraformgroup" {
name = "myResourceGroup"
location = "eastus"
}
resource "azurerm_virtual_network" "myterraformnetwork" {
name = "myVnet"
address_space = ["10.0.0.0/16"]
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
tags = {
environment = "Terraform Demo"
}
}
# Create subnet
resource "azurerm_subnet" "myterraformsubnet" {
name = "mySubnet"
resource_group_name = azurerm_resource_group.myterraformgroup.name
virtual_network_name = azurerm_virtual_network.myterraformnetwork.name
address_prefixes = ["10.0.1.0/24"]
}
# Create public IPs
resource "azurerm_public_ip" "myterraformpublicip" {
name = "myPublicIP"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
allocation_method = "Dynamic"
tags = {
environment = "Terraform Demo"
}
}
# Create Network Security Group and rule
resource "azurerm_network_security_group" "myterraformnsg" {
name = "myNetworkSecurityGroup"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
security_rule {
name = "HTTP"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Terraform Demo"
}
}
# Create network interface
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
ip_configuration {
name = "myNicConfiguration"
subnet_id = azurerm_subnet.myterraformsubnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.myterraformpublicip.id
}
tags = {
environment = "Terraform Demo"
}
}
# Connect the security group to the network interface
resource "azurerm_network_interface_security_group_association" "example" {
network_interface_id = azurerm_network_interface.myterraformnic.id
network_security_group_id = azurerm_network_security_group.myterraformnsg.id
}
# Generate random text for a unique storage account name
resource "random_id" "randomId" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = azurerm_resource_group.myterraformgroup.name
}
byte_length = 8
}
main.tf
provider "azurerm" {
# The "feature" block is required for AzureRM provider 2.x.
# If you're using version 1.x, the "features" block is not allowed.
version = "~>2.0"
features {}
}
module "modules" {
source = "../modules/network"
}
module "vm" {
source = "../modules/vm"
}
Directory Structure:
$ ls
dev/ modules/
$ ls dev/
main.tf
$ ls modules/
network/ vm/
Since you have defined the network interface resource in the module Network, you would like to call the network interface in another module VM. You need to declare the output values from the Network Module (the child module) to selectively export certain values to be accessed by the calling module.
In this case,
main.tf
module "network" {
source = "../modules/network"
}
module "vm" {
source = "../modules/vm"
vm_nic = module.network.nic
}
vm.tf
...
variable "vm_nic" {} # this variables.tf should be in the same directory as the vm.tf
...
resource "azurerm_windows_virtual_machine" "myterraformvm" {
name = "myVM"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
network_interface_ids = [var.vm_nic] # change this
network.tf
output "nic" {
value = azurerm_network_interface.myterraformnic.id
}
For more information, read https://www.terraform.io/docs/language/modules/syntax.html#calling-a-child-module

Trouble sshing into Azure Linux Virtual Machine

I followed the following guide to set up a Linux Virtual Machine using Terraform:
https://learn.microsoft.com/en-us/azure/developer/terraform/create-linux-virtual-machine-with-infrastructure
Everything was sucessfully created in Azure. I am having trouble with the last step of being able to ssh into the virtual machine. I use the following command in Windows powershell:
ssh azureuser#public_ip_here
It gives me the following error:
azureuser#52.186.144.190: Permission denied (publickey).
I've tried using the RDP file from the Azure portal by downloading the RDP file and importing it in RDP but I get the following error:
Things I've tried:
Using the normal ssh command as above
Trying to put the private key in a .pem file and assigning it restricted permissions. Then passing this key in using the ssh -i command. This doesn't work either
Using RDP file downloaded from Azure portal (error shown below)
Ran the test connection feature for the Virtual Machine in the Azure portal and that shows connection successful but I'm still not able to access the VM.
I'm wondering if I have to somehow configure the Azure portal to allow myself to be able to ssh in the VM.
My main.tf code is:
provider "azurerm" {
# The "feature" block is required for AzureRM provider 2.x.
# If you're using version 1.x, the "features" block is not allowed.
version = "~>2.0"
features {}
}
resource "azurerm_resource_group" "myterraformgroup" {
name = "myResourceGroup"
location = "eastus"
tags = {
environment = "Terraform Demo"
}
}
resource "azurerm_virtual_network" "myterraformnetwork" {
name = "myVnet"
address_space = ["10.0.0.0/16"]
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
tags = {
environment = "Terraform Demo"
}
}
resource "azurerm_subnet" "myterraformsubnet" {
name = "mySubnet"
resource_group_name = azurerm_resource_group.myterraformgroup.name
virtual_network_name = azurerm_virtual_network.myterraformnetwork.name
address_prefixes = ["10.0.1.0/24"]
}
resource "azurerm_public_ip" "myterraformpublicip" {
name = "myPublicIP"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
allocation_method = "Dynamic"
tags = {
environment = "Terraform Demo"
}
}
resource "azurerm_network_security_group" "myterraformnsg" {
name = "myNetworkSecurityGroup"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
security_rule {
name = "SSH"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "Terraform Demo"
}
}
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
ip_configuration {
name = "myNicConfiguration"
subnet_id = azurerm_subnet.myterraformsubnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.myterraformpublicip.id
}
tags = {
environment = "Terraform Demo"
}
}
resource "azurerm_network_interface_security_group_association" "example" {
network_interface_id = azurerm_network_interface.myterraformnic.id
network_security_group_id = azurerm_network_security_group.myterraformnsg.id
}
resource "random_id" "randomId" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = azurerm_resource_group.myterraformgroup.name
}
byte_length = 8
}
resource "azurerm_storage_account" "mystorageaccount" {
name = "diag${random_id.randomId.hex}"
resource_group_name = azurerm_resource_group.myterraformgroup.name
location = "eastus"
account_tier = "Standard"
account_replication_type = "LRS"
tags = {
environment = "Terraform Demo"
}
}
resource "tls_private_key" "example_ssh" {
algorithm = "RSA"
rsa_bits = 4096
}
output "tls_private_key" { value = tls_private_key.example_ssh.private_key_pem }
resource "azurerm_linux_virtual_machine" "myterraformvm" {
name = "myVM"
location = "eastus"
resource_group_name = azurerm_resource_group.myterraformgroup.name
network_interface_ids = [azurerm_network_interface.myterraformnic.id]
size = "Standard_DS1_v2"
os_disk {
name = "myOsDisk"
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
computer_name = "myvm"
admin_username = "azureuser"
disable_password_authentication = true
admin_ssh_key {
username = "azureuser"
public_key = tls_private_key.example_ssh.public_key_openssh
}
boot_diagnostics {
storage_account_uri = azurerm_storage_account.mystorageaccount.primary_blob_endpoint
}
tags = {
environment = "Terraform Demo"
}
}
Any help/pointers would be greatly appreciated!
After my validation, you could save the output private pem key to a file named key.pem in the home directory. for example, C:\Users\username\ in Windows 10 or /home/username/ in Linux.
Then you can access the Azure VM via the command in the shell.
ssh -i "C:\Users\username\key.pem" azureuser#23.x.x.x
Result
In addition, the private key generated by tls_private_key will be stored unencrypted in your Terraform state file. It's recommended to generate a private key file outside of Terraform and distribute it securely to the system where Terraform will be run.
You can use ssh-keygen in PowerShell in Windows 10 to create the key pair on the client machine. The key pair is saved into the directory C:\Users\username\.ssh.
For example, then you can send the public key to the Azure VM with Terraform function file:
admin_ssh_key {
username = "azureuser"
public_key = file("C:\\Users\\someusername\\.ssh\\id_rsa.pub")
#tls_private_key.example_ssh.public_key_openssh
}
First create the key.
ssh-keygen -t rsa -b 2048 -C email#example.com
Second add the path of key.
admin_ssh_key {
username = "azureuser"
public_key = file("C:\\Users\\someusername\\.ssh\\id_rsa.pub")
}
Finally login.
ssh -i "C:\Users\someusername.ssh\id_rsa" azureuser#20.x.x.x

Terraform: SSH authentication failed (user#:22): ssh: handshake failed

I wrote some Terraform code to create a new VM and want to execute a command on it via remote-exec but it throws an SSH connection error:
Error: timeout - last error: SSH authentication failed (admin#:22): ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain.
My Terraform code:
# Create a resource group if it doesn’t exist
resource "azurerm_resource_group" "rg" {
name = "${var.deployment}-mp-rg"
location = "${var.azure_environment}"
tags = {
environment = "${var.deployment}"
}
}
# Create virtual network
resource "azurerm_virtual_network" "vnet" {
name = "${var.deployment}-mp-vnet"
address_space = ["10.0.0.0/16"]
location = "${var.azure_environment}"
resource_group_name = "${azurerm_resource_group.rg.name}"
tags = {
environment = "${var.deployment}"
}
}
# Create subnet
resource "azurerm_subnet" "subnet" {
name = "${var.deployment}-mp-subnet"
resource_group_name = "${azurerm_resource_group.rg.name}"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
address_prefix = "10.0.1.0/24"
}
# Create public IPs
resource "azurerm_public_ip" "publicip" {
name = "${var.deployment}-mp-publicip"
location = "${var.azure_environment}"
resource_group_name = "${azurerm_resource_group.rg.name}"
allocation_method = "Dynamic"
tags = {
environment = "${var.deployment}"
}
}
# Create Network Security Group and rule
resource "azurerm_network_security_group" "nsg" {
name = "${var.deployment}-mp-nsg"
location = "${var.azure_environment}"
resource_group_name = "${azurerm_resource_group.rg.name}"
security_rule {
name = "SSH"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = {
environment = "${var.deployment}"
}
}
# Create network interface
resource "azurerm_network_interface" "nic" {
name = "${var.deployment}-mp-nic"
location = "${var.azure_environment}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_security_group_id = "${azurerm_network_security_group.nsg.id}"
ip_configuration {
name = "${var.deployment}-mp-nicconfiguration"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.publicip.id}"
}
tags = {
environment = "${var.deployment}"
}
}
# Generate random text for a unique storage account name
resource "random_id" "randomId" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = "${azurerm_resource_group.rg.name}"
}
byte_length = 8
}
# Create storage account for boot diagnostics
resource "azurerm_storage_account" "storageaccount" {
name = "diag${random_id.randomId.hex}"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "${var.azure_environment}"
account_tier = "Standard"
account_replication_type = "LRS"
tags = {
environment = "${var.deployment}"
}
}
# Create virtual machine
resource "azurerm_virtual_machine" "vm" {
name = "${var.deployment}-mp-vm"
location = "${var.azure_environment}"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_interface_ids = ["${azurerm_network_interface.nic.id}"]
vm_size = "Standard_DS1_v2"
storage_os_disk {
name = "${var.deployment}-mp-disk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
os_profile {
computer_name = "${var.deployment}-mp-ansible"
admin_username = "${var.ansible_user}"
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/${var.ansible_user}/.ssh/authorized_keys"
key_data = "${var.public_key}"
}
}
boot_diagnostics {
enabled = "true"
storage_uri = "${azurerm_storage_account.storageaccount.primary_blob_endpoint}"
}
tags = {
environment = "${var.deployment}"
}
}
resource "null_resource" "ssh_connection" {
connection {
host = "${azurerm_public_ip.publicip.ip_address}"
type = "ssh"
private_key = "${file(var.private_key)}"
port = 22
user = "${var.ansible_user}"
agent = false
timeout = "1m"
}
provisioner "remote-exec" {
inline = ["sudo apt-get -qq install python"]
}
}
I have tried to SSH into the new VM manually with admin#xx.xx.xx.xx:22 and it works. Looking at the error message I then output the parameter ${azurerm_public_ip.publicip.ip_address} but it is null so I think that this is the reason why the SSH authentication failed but I don't know the reason. If I want to SSH the server via Terraform script, how can I modify the code?
Your issue is that Terraform has built a dependency graph that tells it that the only dependency for the null_resource.ssh_connection is the azurerm_public_ip.publicip resource and so it's starting to try to connect before the instance has been created.
This in itself isn't a massive issues as the provisioner would normally attempt to retry in case SSH isn't yet available but the connection details are being determined as soon as the null resource starts. And with the azurerm_public_ip set to an allocation_method of Dynamic it won't get its IP address until after it has been attached to a resource:
Note Dynamic Public IP Addresses aren't allocated until they're assigned to a resource (such as a Virtual Machine or a Load Balancer) by design within Azure - more information is available below.
There's a few different ways you can solve this. You could make the null_resource depend on the azurerm_virtual_machine.vm resource via interpolation or via depends_on:
resource "null_resource" "ssh_connection" {
connection {
host = "${azurerm_public_ip.publicip.ip_address}"
type = "ssh"
private_key = "${file(var.private_key)}"
port = 22
user = "${var.ansible_user}"
agent = false
timeout = "1m"
}
provisioner "remote-exec" {
inline = [
"echo ${azurerm_virtual_machine.vm.id}",
"sudo apt-get -qq install python",
]
}
}
or
resource "null_resource" "ssh_connection" {
depends_on = ["azurerm_virtual_machine.vm"]
connection {
host = "${azurerm_public_ip.publicip.ip_address}"
type = "ssh"
private_key = "${file(var.private_key)}"
port = 22
user = "${var.ansible_user}"
agent = false
timeout = "1m"
}
provisioner "remote-exec" {
inline = ["sudo apt-get -qq install python"]
}
}
A better approach here would to be to run the provisioner as part of the azurerm_virtual_machine.vm resource instead of a null_resource. The normal reasons to use a null_resource to launch a provisioner are when you need to wait until after something else has happened to a resource such as attaching a disk or if there's not an appropriate resource to attach it to but this doesn't really apply here. So instead of your existing null_resource you'd move the provisioner into the azurerm_virtual_machine.vm resource:
resource "azurerm_virtual_machine" "vm" {
# ...
provisioner "remote-exec" {
connection {
host = "${azurerm_public_ip.publicip.ip_address}"
type = "ssh"
private_key = "${file(var.private_key)}"
port = 22
user = "${var.ansible_user}"
agent = false
timeout = "1m"
}
inline = ["sudo apt-get -qq install python"]
}
}
For many resources this also allows you to refer to the outputs of the resource you are provisioning by using the self keyword. Unfortunately the azurerm_virtual_machine resource doesn't seem to easily expose the IP address of the VM due to this being set by the network_interface_ids.

The azurerm_virtual_machine is not displaying in the Terraform plan

I am using an existing template from github and have modified it a little bit. When I terraform.exe plan it says it will deploy 4 resources (NIC, NSG, VM-SA, and Resource Group).
I am trying to deploy a VM and have it joined to an existing VNet.
I have removed the NIC to see if it would add the windows VM for deployment and it does not
main.tf
# Configure the Microsoft Azure Provider
provider "azurerm" {
subscription_id = "************************************"
tenant_id = "************************************"
client_id = "************************************"
client_secret = "************************************"
}
module "os" {
source = "./os"
vm_os_simple = "${var.vm_os_simple}"
}
resource "azurerm_resource_group" "vm" {
name = "${var.resource_group_name}"
location = "${var.location}"
tags = "${var.tags}"
}
resource "random_id" "vm-sa" {
keepers = {
vm_hostname = "${var.vm_hostname}"
}
byte_length = 6
}
resource "azurerm_network_security_group" "nsg" {
name = "${var.network_security_group}"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
}
resource "azurerm_storage_account" "vm-sa" {
count = "${var.boot_diagnostics == "true" ? 1 : 0}"
name = "bootdiag${lower(random_id.vm-sa.hex)}"
resource_group_name = "${azurerm_resource_group.vm.name}"
location = "${var.location}"
account_tier = "${element(split("_", var.boot_diagnostics_sa_type),0)}"
account_replication_type = "${element(split("_", var.boot_diagnostics_sa_type),1)}"
tags = "${var.tags}"
}
resource "azurerm_virtual_machine" "vm-windows" {
count = "${((var.is_windows_image == "true" || contains(list("${var.vm_os_simple}","${var.vm_os_offer}"), "Windows")) && var.data_disk == "false") ? var.nb_instances : 0}"
name = "${var.vm_hostname}${count.index}"
location = "${var.location}"
resource_group_name = "${azurerm_resource_group.vm.name}"
vm_size = "${var.vm_size}"
network_interface_ids = ["${element(azurerm_network_interface.vm.*.id, count.index)}"]
delete_os_disk_on_termination = "${var.delete_os_disk_on_termination}"
storage_image_reference {
id = "${var.vm_os_id}"
publisher = "${var.vm_os_id == "" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : ""}"
offer = "${var.vm_os_id == "" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : ""}"
sku = "${var.vm_os_id == "" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : ""}"
version = "${var.vm_os_id == "" ? var.vm_os_version : ""}"
}
storage_os_disk {
name = "osdisk-${var.vm_hostname}-${count.index}"
create_option = "FromImage"
caching = "ReadWrite"
managed_disk_type = "${var.storage_account_type}"
}
os_profile {
computer_name = "${var.vm_hostname}${count.index}"
admin_username = "${var.admin_username}"
admin_password = "${var.admin_password}"
}
tags = "${var.tags}"
os_profile_windows_config {
provision_vm_agent = true
}
boot_diagnostics {
enabled = "${var.boot_diagnostics}"
storage_uri = "${var.boot_diagnostics == "true" ? join(",", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : "" }"
}
}
#refer to a subnet
data "azurerm_subnet" "test" {
name = "SubnetName"
virtual_network_name = "VNetName"
resource_group_name = "VNetresourceGroupName"
}
resource "azurerm_network_interface" "vm" {
count = "${var.nb_instances}"
name = "nic-${var.vm_hostname}-${count.index}"
location = "${azurerm_resource_group.vm.location}"
resource_group_name = "${azurerm_resource_group.vm.name}"
network_security_group_id = "${azurerm_network_security_group.nsg.id}"
ip_configuration {
name = "ipconfig${count.index}"
subnet_id = "${data.azurerm_subnet.test.id}"
private_ip_address_allocation = "Dynamic"
}
tags = "${var.tags}"
}
Expected results should be Deploy a VM, Storage Account, Network Security Group and Nic that is joined to an existing VNet
To create an Azure VM through Terraform, you can see the whole steps in Create a complete Linux virtual machine infrastructure in Azure with Terraform, it's a Linux VM, but you can change the image into windows and the os_profile_linux_config into os_profile_windows_config.
In an existing Vnet, you can use the Terraform data to quote the Vnet as you provide:
data "azurerm_subnet" "existing" {
name = "SubnetName"
virtual_network_name = "VNetName"
resource_group_name = "VNetresourceGroupName"
}
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_security_group_id = "${azurerm_network_security_group.myterraformnsg.id}"
ip_configuration {
name = "myNicConfiguration"
subnet_id = "${data.azurerm_subnet.existing.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.myterraformpublicip.id}"
}
tags {
environment = "Terraform Demo"
}
}
The whole Terraform code here and you can change the information about the VM as you want.
# Configure the Microsoft Azure Provider
provider "azurerm" {
subscription_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
client_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
client_secret = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
tenant_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
# Create a resource group if it doesn’t exist
resource "azurerm_resource_group" "myterraformgroup" {
name = "myResourceGroup"
location = "eastus"
tags {
environment = "Terraform Demo"
}
}
# the existing subnet of the virtual network
data "azurerm_subnet" "existing" {
name = "SubnetName"
virtual_network_name = "VNetName"
resource_group_name = "VNetresourceGroupName"
}
# Create public IPs
resource "azurerm_public_ip" "myterraformpublicip" {
name = "myPublicIP"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
allocation_method = "Dynamic"
tags {
environment = "Terraform Demo"
}
}
# Create Network Security Group and rule
resource "azurerm_network_security_group" "myterraformnsg" {
name = "myNetworkSecurityGroup"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
security_rule {
name = "RDP"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "3306"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags {
environment = "Terraform Demo"
}
}
# Create network interface
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_security_group_id = "${azurerm_network_security_group.myterraformnsg.id}"
ip_configuration {
name = "myNicConfiguration"
subnet_id = "${data.azurerm_subnet.existing.id}"
private_ip_address_allocation = "Dynamic"
public_ip_address_id = "${azurerm_public_ip.myterraformpublicip.id}"
}
tags {
environment = "Terraform Demo"
}
}
# Generate random text for a unique storage account name
resource "random_id" "randomId" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = "${azurerm_resource_group.myterraformgroup.name}"
}
byte_length = 8
}
# Create storage account for boot diagnostics
resource "azurerm_storage_account" "mystorageaccount" {
name = "diag${random_id.randomId.hex}"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
location = "eastus"
account_tier = "Standard"
account_replication_type = "LRS"
tags {
environment = "Terraform Demo"
}
}
# Create virtual machine
resource "azurerm_virtual_machine" "myterraformvm" {
name = "myVM"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_interface_ids = ["${azurerm_network_interface.myterraformnic.id}"]
vm_size = "Standard_DS1_v2"
storage_os_disk {
name = "myOsDisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter-Server-Core-smalldisk"
version = "latest"
}
os_profile {
computer_name = "myvm"
admin_username = "azureuser"
admin_password = "Passwd#!1234"
}
os_profile_windows_config {
provision_vm_agent = true
}
boot_diagnostics {
enabled = "true"
storage_uri = "${azurerm_storage_account.mystorageaccount.primary_blob_endpoint}"
}
tags {
environment = "Terraform Demo"
}
}
For more details, see Azure Virtual Machine in Terraform.

Provisioning a Windows VM in Azure with WinRM port (5986) open

I'm trying to provision a windows vm on Azure with Terraform with the port 5986 open to allow winrm access. The provisioning of the VM works.
I'm stuck on opening the port with terraform during provisioning. Any ideas?
You can follow the terraform script to create a windows server 2016 datacenter and open the default RDP port 3389 and port 5986 in NSG. It works for me.
Terraform v0.11.8
+ provider.azurerm v1.14.0
+ provider.random v2.0.0
The full sample
variable "resourcename" {
default = "myResourceGroup"
}
# Configure the Microsoft Azure Provider
provider "azurerm" {
subscription_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
client_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
client_secret = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
tenant_id = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
}
# Create a resource group if it doesn’t exist
resource "azurerm_resource_group" "myterraformgroup" {
name = "myResourceGroup"
location = "eastus"
tags {
environment = "Terraform Demo"
}
}
# Create virtual network
resource "azurerm_virtual_network" "myterraformnetwork" {
name = "myVnet"
address_space = ["10.0.0.0/16"]
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
tags {
environment = "Terraform Demo"
}
}
# Create subnet
resource "azurerm_subnet" "myterraformsubnet" {
name = "mySubnet"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
virtual_network_name = "${azurerm_virtual_network.myterraformnetwork.name}"
address_prefix = "10.0.1.0/24"
}
# Create public IPs
resource "azurerm_public_ip" "myterraformpublicip" {
name = "myPublicIP"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
public_ip_address_allocation = "dynamic"
tags {
environment = "Terraform Demo"
}
}
# Create Network Security Group and rule
resource "azurerm_network_security_group" "myterraformnsg" {
name = "myNetworkSecurityGroup"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
security_rule {
name = "RDP"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "3389"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "WinRM"
priority = 998
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "5986"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags {
environment = "Terraform Demo"
}
}
# Create network interface
resource "azurerm_network_interface" "myterraformnic" {
name = "myNIC"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_security_group_id = "${azurerm_network_security_group.myterraformnsg.id}"
ip_configuration {
name = "myNicConfiguration"
subnet_id = "${azurerm_subnet.myterraformsubnet.id}"
private_ip_address_allocation = "dynamic"
public_ip_address_id = "${azurerm_public_ip.myterraformpublicip.id}"
}
tags {
environment = "Terraform Demo"
}
}
# Generate random text for a unique storage account name
resource "random_id" "randomId" {
keepers = {
# Generate a new ID only when a new resource group is defined
resource_group = "${azurerm_resource_group.myterraformgroup.name}"
}
byte_length = 8
}
# Create storage account for boot diagnostics
resource "azurerm_storage_account" "mystorageaccount" {
name = "diag${random_id.randomId.hex}"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
location = "eastus"
account_tier = "Standard"
account_replication_type = "LRS"
tags {
environment = "Terraform Demo"
}
}
# Create virtual machine
resource "azurerm_virtual_machine" "myterraformvm" {
name = "myVM"
location = "eastus"
resource_group_name = "${azurerm_resource_group.myterraformgroup.name}"
network_interface_ids = ["${azurerm_network_interface.myterraformnic.id}"]
vm_size = "Standard_DS1_v2"
storage_os_disk {
name = "myOsDisk"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "MicrosoftWindowsServer"
offer = "WindowsServer"
sku = "2016-Datacenter"
version = "latest"
}
os_profile {
computer_name = "myvm"
admin_username = "azureuser"
admin_password = "Password1234!"
}
os_profile_windows_config {
enable_automatic_upgrades = false
}
boot_diagnostics {
enabled = "true"
storage_uri = "${azurerm_storage_account.mystorageaccount.primary_blob_endpoint}"
}
tags {
environment = "Terraform Demo"
}
}

Resources