Executing shell script in 3 azure vm's using azure terraform - azure

below code creates 3 linux ubuntu azure vm's .At the same time i want to execute shell script in these 3 vm's .For this am using the below code but am getting the following error basically am using filebase64 to execute the code but its not working.Could anyone please check it and let me know the issue
# Resource-1: Azure Resource Group
resource "azurerm_resource_group" "myrg" {
name = "${var.resource_group}"
location = "${var.location}"
}
# Create Virtual Network
resource "azurerm_virtual_network" "myvnet" {
name = "myvnet-1"
address_space = ["10.0.0.0/16"]
location = azurerm_resource_group.myrg.location
resource_group_name = azurerm_resource_group.myrg.name
}
# Create Subnet
resource "azurerm_subnet" "mysubnet" {
name = "mysubnet-1"
resource_group_name = azurerm_resource_group.myrg.name
virtual_network_name = azurerm_virtual_network.myvnet.name
address_prefixes = ["10.0.2.0/24"]
}
#Create Bastion host
resource "azurerm_subnet" "Azure_Bastion_Subnet" {
name = "AzureBastionSubnet"
resource_group_name = azurerm_resource_group.myrg.name
virtual_network_name = azurerm_virtual_network.myvnet.name
address_prefixes = ["10.0.3.0/24"]
}
#Create Azure Public IP Address
resource "azurerm_public_ip" "mypublicip" {
count = "${var.instance_count}"
name = "mypublicip-${count.index}"
resource_group_name = azurerm_resource_group.myrg.name
location = azurerm_resource_group.myrg.location
allocation_method = "Static"
sku = "Standard"
domain_name_label = "app1-vm-${count.index}-${random_string.myrandom.id}"
}
resource "azurerm_network_interface" "myvmnic" {
count = "${var.instance_count}"
name = "vmnic-${count.index}"
location = azurerm_resource_group.myrg.location
resource_group_name = azurerm_resource_group.myrg.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.mysubnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = element(azurerm_public_ip.mypublicip[*].id, count.index)
}
}
resource "azurerm_network_security_group" "linux-nsg"{
name = "acceptanceTestSecurityGroup1"
location = azurerm_resource_group.myrg.location
resource_group_name = azurerm_resource_group.myrg.name
security_rule {
name = "Allowssh"
priority = 150
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
security_rule {
name = "Allow"
priority = 100
direction = "Outbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "*"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "example" {
subnet_id = azurerm_subnet.mysubnet.id
network_security_group_id = azurerm_network_security_group.linux-nsg.id
}
data "template_file" "config"{
template = file("${path.module}/script.sh")
}
# Resource: Azure Linux Virtual Machine
resource "azurerm_linux_virtual_machine" "mylinuxvm" {
count = "${var.instance_count}"
name = "mylinuxvm-${count.index}"
computer_name = "zookeeper-${count.index}" # Hostname of the VM
resource_group_name = azurerm_resource_group.myrg.name
location = azurerm_resource_group.myrg.location
size = "Standard_DS1_v2"
admin_username = "useradmin"
#admin_password = "Solr#12345"
network_interface_ids = [ element(azurerm_network_interface.myvmnic[*].id, count.index)]
custom_data = filebase64(data.template_file.config.rendered)
disable_password_authentication = true
admin_ssh_key {
username = "useradmin"
public_key = file("${path.module}/ssh-keys/terraform-azure.pub")
}
os_disk {
name = "osdisk${count.index}"
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
connection {
type = "ssh"
host = self.public_ip_address
user = self.admin_username
private_key = file("${path.module}/ssh-keys/terraform-azure.pem")
}
}
resource "azurerm_public_ip" "bastion_ip" {
name = "bastion_ip"
location = azurerm_resource_group.myrg.location
resource_group_name = azurerm_resource_group.myrg.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_bastion_host" "bastion_test" {
name = "bastion-test"
location = azurerm_resource_group.myrg.location
resource_group_name = azurerm_resource_group.myrg.name
ip_configuration {
name = "bastion-configuration"
subnet_id = azurerm_subnet.Azure_Bastion_Subnet.id
public_ip_address_id = azurerm_public_ip.bastion_ip.id
}
}
Error: Invalid function argument
on main.tf line 125, in resource "azurerm_linux_virtual_machine" "mylinuxvm":
125: custom_data = filebase64(data.template_file.config.rendered)
├────────────────
│ while calling filebase64(path)
│ data.template_file.config.rendered is "#!/bin/sh\nhost=$(hostname)\nif [ \"$host\" = \"zookeeper-0\" ]\nthen\nmkdir -p
~/zookeeper/zk-server-1\nmkdir -p ~/zookeeper/data/zk1\nmkdir -p ~/zookeeper/log/zk1\n\nelif [ \"$host\" = \"zookeeper-1\"
]\nthen\nmkdir -p ~/zookeeper/zk-server-2\nmkdir -p ~/zookeeper/data/zk2\nmkdir -p ~/zookeeper/log/zk2\necho \"2\" >
~/zookeeper/data/zk2/myid\n\nelif [ \"$host\" = \"zookeeper-2\" ]\nthen\nmkdir -p ~/zookeeper/zk-server-3\nmkdir -p
~/zookeeper/data/zk3\nmkdir -p ~/zookeeper/log/zk3\necho \"3\" > ~/zookeeper/data/zk3/myid\nfi\n\n"
Invalid value for "path" parameter: no file exists at
"#!/bin/sh\nhost=$(hostname)\nif [ \"$host\" = \"zookeeper-0\" ]\nthen\nmkdir
-p ~/zookeeper/zk-server-1\nmkdir -p ~/zookeeper/data/zk1\nmkdir -p
~/zookeeper/log/zk1\n\nelif [ \"$host\" = \"zookeeper-1\" ]\nthen\nmkdir -p
~/zookeeper/zk-server-2\nmkdir -p ~/zookeeper/data/zk2\nmkdir -p
~/zookeeper/log/zk2\necho \"2\" > ~/zookeeper/data/zk2/myid\n\nelif [
\"$host\" = \"zookeeper-2\" ]\nthen\nmkdir -p ~/zookeeper/zk-server-3\nmkdir
-p ~/zookeeper/data/zk3\nmkdir -p ~/zookeeper/log/zk3\necho \"3\" >
~/zookeeper/data/zk3/myid\nfi\n\n"; this function works only with files that
are distributed as part of the configuration source code, so if this file
will be created by a resource in this configuration you must instead obtain
this result from an attribute of that resource.
Error: Invalid function argument
on main.tf line 125, in resource "azurerm_linux_virtual_machine" "mylinuxvm":
125: custom_data = filebase64(data.template_file.config.rendered)
├────────────────
│ while calling filebase64(path)
│ data.template_file.config.rendered is "#!/bin/sh\nhost=$(hostname)\nif [ \"$host\" = \"zookeeper-0\" ]\nthen\nmkdir -p
~/zookeeper/zk-server-1\nmkdir -p ~/zookeeper/data/zk1\nmkdir -p ~/zookeeper/log/zk1\n\nelif [ \"$host\" = \"zookeeper-1\"
]\nthen\nmkdir -p ~/zookeeper/zk-server-2\nmkdir -p ~/zookeeper/data/zk2\nmkdir -p ~/zookeeper/log/zk2\necho \"2\" >
~/zookeeper/data/zk2/myid\n\nelif [ \"$host\" = \"zookeeper-2\" ]\nthen\nmkdir -p ~/zookeeper/zk-server-3\nmkdir -p
~/zookeeper/data/zk3\nmkdir -p ~/zookeeper/log/zk3\necho \"3\" > ~/zookeeper/data/zk3/myid\nfi\n\n"
Invalid value for "path" parameter: no file exists at
"#!/bin/sh\nhost=$(hostname)\nif [ \"$host\" = \"zookeeper-0\" ]\nthen\nmkdir
-p ~/zookeeper/zk-server-1\nmkdir -p ~/zookeeper/data/zk1\nmkdir -p
~/zookeeper/log/zk1\n\nelif [ \"$host\" = \"zookeeper-1\" ]\nthen\nmkdir -p
~/zookeeper/zk-server-2\nmkdir -p ~/zookeeper/data/zk2\nmkdir -p
~/zookeeper/log/zk2\necho \"2\" > ~/zookeeper/data/zk2/myid\n\nelif [
\"$host\" = \"zookeeper-2\" ]\nthen\nmkdir -p ~/zookeeper/zk-server-3\nmkdir
-p ~/zookeeper/data/zk3\nmkdir -p ~/zookeeper/log/zk3\necho \"3\" >
~/zookeeper/data/zk3/myid\nfi\n\n"; this function works only with files that
are distributed as part of the configuration source code, so if this file
will be created by a resource in this configuration you must instead obtain
this result from an attribute of that resource.
Error: Invalid function argument
on main.tf line 125, in resource "azurerm_linux_virtual_machine" "mylinuxvm":
125: custom_data = filebase64(data.template_file.config.rendered)
├────────────────
│ while calling filebase64(path)
│ data.template_file.config.rendered is "#!/bin/sh\nhost=$(hostname)\nif [ \"$host\" = \"zookeeper-0\" ]\nthen\nmkdir -p
~/zookeeper/zk-server-1\nmkdir -p ~/zookeeper/data/zk1\nmkdir -p ~/zookeeper/log/zk1\n\nelif [ \"$host\" = \"zookeeper-1\"
]\nthen\nmkdir -p ~/zookeeper/zk-server-2\nmkdir -p ~/zookeeper/data/zk2\nmkdir -p ~/zookeeper/log/zk2\necho \"2\" >
~/zookeeper/data/zk2/myid\n\nelif [ \"$host\" = \"zookeeper-2\" ]\nthen\nmkdir -p ~/zookeeper/zk-server-3\nmkdir -p
~/zookeeper/data/zk3\nmkdir -p ~/zookeeper/log/zk3\necho \"3\" > ~/zookeeper/data/zk3/myid\nfi\n\n"
Invalid value for "path" parameter: no file exists at
"#!/bin/sh\nhost=$(hostname)\nif [ \"$host\" = \"zookeeper-0\" ]\nthen\nmkdir
-p ~/zookeeper/zk-server-1\nmkdir -p ~/zookeeper/data/zk1\nmkdir -p
~/zookeeper/log/zk1\n\nelif [ \"$host\" = \"zookeeper-1\" ]\nthen\nmkdir -p
~/zookeeper/zk-server-2\nmkdir -p ~/zookeeper/data/zk2\nmkdir -p
~/zookeeper/log/zk2\necho \"2\" > ~/zookeeper/data/zk2/myid\n\nelif [
\"$host\" = \"zookeeper-2\" ]\nthen\nmkdir -p ~/zookeeper/zk-server-3\nmkdir
-p ~/zookeeper/data/zk3\nmkdir -p ~/zookeeper/log/zk3\necho \"3\" >
~/zookeeper/data/zk3/myid\nfi\n\n"; this function works only with files that
are distributed as part of the configuration source code, so if this file
will be created by a resource in this configuration you must instead obtain
this result from an attribute of that resource.

Delete this block
data "template_file" "config" {
template = file("${path.module}/script.sh")
}
and Replace below code
# Resource: Azure Linux Virtual Machine
resource "azurerm_linux_virtual_machine" "mylinuxvm" {
[...
custom_data = filebase64(data.template_file.config.rendered)
...]
}
with
# Resource: Azure Linux Virtual Machine
resource "azurerm_linux_virtual_machine" "mylinuxvm" {
[...
custom_data = filebase64("${path.module}/script.sh")
...]
}
As an reference your result would be
# azurerm_linux_virtual_machine.mylinuxvm[0] will be created
+ resource "azurerm_linux_virtual_machine" "mylinuxvm" {
+ admin_username = "useradmin"
+ allow_extension_operations = true
+ computer_name = "zookeeper-0"
+ custom_data = (sensitive value)
+ disable_password_authentication = true
+ extensions_time_budget = "PT1H30M"
+ id = (known after apply)
+ location = "westeurope"
+ max_bid_price = -1
+ name = "mylinuxvm-0"
+ network_interface_ids = (known after apply)
+ patch_assessment_mode = "ImageDefault"
+ patch_mode = "ImageDefault"
+ platform_fault_domain = -1
+ priority = "Regular"
+ private_ip_address = (known after apply)
+ private_ip_addresses = (known after apply)
+ provision_vm_agent = true
+ public_ip_address = (known after apply)
+ public_ip_addresses = (known after apply)
+ resource_group_name = "rg-kv-stackoverflow"
+ size = "Standard_DS1_v2"
+ virtual_machine_id = (known after apply)
[...]
Edited
script I have used to validate
Please check your script also if it works properly. You could verify this by checking the /var/log/cloud-init-output.log file
#!/bin/bash
printf "Hello World"
mkdir -p /tmp/user-data
Logs from cloud-init-output.log
Cloud-init v. 21.1-19-gbad84ad4-0ubuntu1~16.04.2 running 'modules:config' at Tue, 10 Jan 2023 10:59:16 +0000. Up 40.04 seconds.
Hello WorldCloud-init v. 21.1-19-gbad84ad4-0ubuntu1~16.04.2 running 'modules:final' at Tue, 10 Jan 2023 10:59:32 +0000. Up 56.18 seconds.
Execution of script
user-data directory is created.
azureadmin#zookeeper-0:~$ ls -l /tmp
total 8
drwx------ 3 root root 4096 Jan 10 10:58 systemd-private-1ff50958c212495f8f02a9d123bf4cac-systemd-timesyncd.service-Wcs89v
drwxr-xr-x 2 root root 4096 Jan 10 10:59 user-data
Additional background
Custom data is made available to the VM during first startup or setup, which is called provisioning. If your machine was already created and then you are using custom data in that case you have to do additional steps.
Refer to https://learn.microsoft.com/en-us/azure/virtual-machines/custom-data

Related

Terraform Azure VM doesn't recreate the instance even after changing custom_data

I am trying to provision an an azure virtual machine and I need to install kubectl on it. So I am using a bash script and pass it to the VM's custom_data section.
Everything works fine - it provisions the VM and install the kubectl.
But the problem is, if I do some modifications to the the bash script, and do terraform apply, it show no changes as it doesn't detect that I did the changes to the bash script.
Here's my code snippet. Can someone help me understand this?
locals {
admin_username = "myjumphost"
admin_password = "mypassword!610542"
}
resource "azurerm_virtual_machine" "vm" {
name = var.vm_name
location = var.location
resource_group_name = var.rg_name
network_interface_ids = var.nic_id
vm_size = var.vm_size
delete_os_disk_on_termination = true
delete_data_disks_on_termination = true
storage_image_reference {
publisher = var.storage_image_reference.publisher
offer = var.storage_image_reference.offer
sku = var.storage_image_reference.sku
version = var.storage_image_reference.version
}
storage_os_disk {
name = var.storage_os_disk.name
caching = var.storage_os_disk.caching
create_option = var.storage_os_disk.create_option
managed_disk_type = var.storage_os_disk.managed_disk_type
}
os_profile {
computer_name = var.vm_name
admin_username = local.admin_username
admin_password = local.admin_password
custom_data = file("${path.module}/${var.custom_data_script}")
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
key_data = file("${path.module}/${var.ssh_public_key}")
path = "/home/${local.admin_username}/.ssh/authorized_keys"
}
}
tags = merge(var.common_tags)
}
any my script install.sh
#!/bin/bash
# install Kubectl
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl version --client --output=yaml > /tmp/kubectl_version.yaml
# Install Azure CLI
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
Try the refresh command; it was backwards compatible. Usually, it won't be necessary because the plan will execute the same refresh. Most confusion will be caused by bash or PowerShell scripts that are run as cloud-init script on backed and implement the functionality; we can cross-check that in activity logs.
terraform apply -refresh-only -auto-approve
I have tried to replicate the same with below mentioned code base
main tf file as follows
resource "azurerm_resource_group" "example" {
name = "v-swarna-mindtree"
location = "Germany West Central"
}
data "azuread_client_config" "current" {}
resource "azurerm_virtual_network" "puvnet" {
name = "Public_VNET"
resource_group_name = azurerm_resource_group.example.name
location = "Germany West Central"
address_space = ["10.19.0.0/16"]
dns_servers = ["10.19.0.4", "10.19.0.5"]
}
resource "azurerm_subnet" "osubnet" {
name = "Outer_Subnet"
resource_group_name = azurerm_resource_group.example.name
address_prefixes = ["10.19.1.0/24"]
virtual_network_name = azurerm_virtual_network.puvnet.name
}
resource "azurerm_network_interface" "main" {
name = "testdemo"
location = "Germany West Central"
resource_group_name = azurerm_resource_group.example.name
ip_configuration {
name = "testconfiguration1"
subnet_id = azurerm_subnet.osubnet.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_virtual_machine" "main" {
name = "vmjumphost"
location = "Germany West Central"
resource_group_name = azurerm_resource_group.example.name
network_interface_ids = [azurerm_network_interface.main.id]
//vm_size = "Standard_A1_v2"
vm_size ="Standard_DS2_v2"
storage_image_reference {
offer = "0001-com-ubuntu-server-focal"
publisher = "Canonical"
sku = "20_04-lts-gen2"
version = "latest"
}
storage_os_disk {
name = "myosdisk2"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
os_profile {
computer_name = "vm-swarnademo"
admin_username = "testadmin"
admin_password = "Password1234!"
// custom_data = file("${path.module}/${var.custom_data_script}")
custom_data = file("install.sh")
}
os_profile_linux_config {
disable_password_authentication = false
# ssh_keys {
# key_data = file("${path.module}/${var.ssh_public_key}")
# path = "/home/${local.admin_username}/.ssh/authorized_keys"
# }
}
tags = {
environment = "staging"
}
}
install sh file as follows
#!/bin/bash
# install Kubectl
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl version --client --output=yaml > /tmp/kubectl_version.yaml
#testing by adding command -start
#sudo apt-get -y update
#testing by adding command -End
#Install Azure CLI
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
Trying to update the script with below line of command
#testing by adding command -start
#sudo apt-get -y update
#testing by adding command -End
Step1:
while implement plan and apply, it was created all resources on portal.
Step2:
Updated the script by enable the code base
#testing by adding command -start
sudo apt-get -y update
#testing by adding command -End
upon running plan and apply, it will refresh the state of the virtual machine and all the changes
Verification from Activity Log:

Terraform azurerm_storage_share_directory does not work with file share 'NFS'

We created an Azure storage account with the intention of creating an 'Azure File' to be mounted using NFS (default is SMB). Below is the Terraform code which creates a storage account, a file share and a private endpoint to the file share so that it can be mounted using NFS.
resource "azurerm_storage_account" "az_file_sa" {
name = "abcdxxxyyyzzz"
resource_group_name = local.resource_group_name
location = var.v_region
account_tier = "Premium"
account_kind = "FileStorage"
account_replication_type = "LRS"
enable_https_traffic_only = false
}
resource "azurerm_storage_share" "file_share" {
name = "fileshare"
storage_account_name = azurerm_storage_account.az_file_sa.name
quota = 100
enabled_protocol = "NFS"
depends_on = [ azurerm_storage_account.az_file_sa ]
}
resource "azurerm_private_endpoint" "fileshare-endpoint" {
name = "fileshare-endpoint"
location = var.v_region
resource_group_name = local.resource_group_name
subnet_id = azurerm_subnet.subnet2.id
private_service_connection {
name = "fileshare-endpoint-connection"
private_connection_resource_id = azurerm_storage_account.az_file_sa.id
is_manual_connection = false
subresource_names = [ "file" ]
}
depends_on = [ azurerm_storage_share.file_share ]
}
This works fine. Now, if we try to create a directory on this file share using below Terraform code
resource "azurerm_storage_share_directory" "xxx" {
name = "dev"
share_name = "fileshare"
storage_account_name = "abcdxxxyyyzzz"
}
error we get is,
│ Error: checking for presence of existing Directory "dev" (File Share "fileshare" / Storage Account "abcdxxxyyyzzz" / Resource Group "RG_XXX_YO"): directories.Client#Get: Failure sending request: StatusCode=0 -- Original Error: Get "https://abcdxxxyyyzzz.file.core.windows.net/fileshare/dev?restype=directory": read tcp 192.168.1.3:61175->20.60.179.37:443: read: connection reset by peer
Clearly, this share is not accessible over public https endpoint.
Is there a way to create a directory using 'azurerm_storage_share_directory' when file share is of type 'NFS'?
We were able to mount NFS on a Linux VM (in the same virtual network) using below code where 10.10.2.4 is private IP of the NFS fileshare endpoint.
sudo mkdir -p /mount/abcdxxxyyyzzz/fileshare
sudo mount -t nfs 10.10.2.4:/abcdxxxyyyzzz/fileshare /mount/abcdxxxyyyzzz/fileshare -o vers=4,minorversion=1,sec=sys
regards, Yogesh
full Terraform files
vnet.tf
resource "azurerm_virtual_network" "vnet" {
name = "yogimogi-vnet"
address_space = ["10.10.0.0/16"]
location = local.region
resource_group_name = local.resource_group_name
depends_on = [ azurerm_resource_group.rg ]
}
resource "azurerm_subnet" "subnet1" {
name = "yogimogi-vnet-subnet1"
resource_group_name = local.resource_group_name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.10.1.0/24"]
service_endpoints = ["Microsoft.Storage"]
}
resource "azurerm_subnet" "subnet2" {
name = "yogimogi-vnet-subnet2"
resource_group_name = local.resource_group_name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = ["10.10.2.0/24"]
service_endpoints = ["Microsoft.Storage"]
}
main.tf
resource "azurerm_resource_group" "rg" {
name = local.resource_group_name
location = local.region
tags = {
description = "Resource group for some testing, Yogesh KETKAR"
createdBy = "AutomationEdge"
createDate = "UTC time: ${timestamp()}"
}
}
resource "azurerm_storage_account" "sa" {
name = local.storage_account_name
resource_group_name = local.resource_group_name
location = local.region
account_tier = "Premium"
account_kind = "FileStorage"
account_replication_type = "LRS"
enable_https_traffic_only = false
depends_on = [ azurerm_resource_group.rg ]
}
resource "azurerm_storage_share" "file_share" {
name = "fileshare"
storage_account_name = azurerm_storage_account.sa.name
quota = 100
enabled_protocol = "NFS"
depends_on = [ azurerm_storage_account.sa ]
}
resource "azurerm_storage_account_network_rules" "network_rule" {
storage_account_id = azurerm_storage_account.sa.id
default_action = "Allow"
ip_rules = ["127.0.0.1"]
virtual_network_subnet_ids = [azurerm_subnet.subnet2.id, azurerm_subnet.subnet1.id]
bypass = ["Metrics"]
}
resource "azurerm_private_endpoint" "fileshare-endpoint" {
name = "fileshare-endpoint"
location = local.region
resource_group_name = local.resource_group_name
subnet_id = azurerm_subnet.subnet2.id
private_service_connection {
name = "fileshare-endpoint-connection"
private_connection_resource_id = azurerm_storage_account.sa.id
is_manual_connection = false
subresource_names = [ "file" ]
}
depends_on = [ azurerm_storage_share.file_share ]
}
resource "azurerm_storage_share_directory" "d1" {
name = "d1"
share_name = azurerm_storage_share.file_share.name
storage_account_name = azurerm_storage_account.sa.name
depends_on = [ azurerm_storage_share.file_share, azurerm_private_endpoint.fileshare-endpoint ]
}
error is
╷
│ Error: checking for presence of existing Directory "d1" (File Share "fileshare" / Storage Account "22xdkkdkdkdkdkdkdx22" / Resource Group "RG_Central_US_YOGIMOGI"): directories.Client#Get: Failure sending request: StatusCode=0 -- Original Error: Get
"https://22xdkkdkdkdkdkdkdx22.file.core.windows.net/fileshare/d1?restype=directory": read tcp 10.41.7.110:54240->20.209.18.37:443: read: connection reset by peer
│
│ with azurerm_storage_share_directory.d1,
│ on main.tf line 60, in resource "azurerm_storage_share_directory" "d1":
│ 60: resource "azurerm_storage_share_directory" "d1" {
│
╵
I tried to reproduce the same having private endpoint ,having NFS enabled
and got errors as network rule is not created when NFS enabled.
As virtual network provides access control for NFS , after vnet creation you must configure a virtual network rule,for file share to be accessed.
resource "azurerm_virtual_network" "example" {
name = "ka-vnet"
address_space = ["10.0.0.0/16"]
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
// tags = local.common_tags
}
resource "azurerm_subnet" "storage" {
name = "ka-subnet"
resource_group_name = data.azurerm_resource_group.example.name
virtual_network_name = azurerm_virtual_network.example.name
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_storage_account" "az_file_sa" {
name = "kaabdx"
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
account_tier = "Premium"
account_kind = "FileStorage"
account_replication_type = "LRS"
enable_https_traffic_only = false
//provide network rules
network_rules {
default_action = "Allow"
ip_rules = ["127.0.0.1/24"]
//23.45.1.0/24
virtual_network_subnet_ids = ["${azurerm_subnet.storage.id }"]
}
}
resource "azurerm_private_endpoint" "fileshare-endpoint" {
name = "fileshare-endpoint"
location = data.azurerm_resource_group.example.location
resource_group_name = data.azurerm_resource_group.example.name
subnet_id = azurerm_subnet.storage.id
private_service_connection {
name = "fileshare-endpoint-connection"
private_connection_resource_id = azurerm_storage_account.az_file_sa.id
is_manual_connection = false
subresource_names = [ "file" ]
}
depends_on = [ azurerm_storage_share.file_share ]
}
resource "azurerm_storage_share" "file_share" {
name = "fileshare"
storage_account_name = azurerm_storage_account.az_file_sa.name
quota = 100
enabled_protocol = "NFS"
depends_on = [ azurerm_storage_account.az_file_sa ]
}
resource "azurerm_storage_share_directory" "mynewfileshare" {
name = "kadev"
share_name = azurerm_storage_share.file_share.name
storage_account_name = azurerm_storage_account.az_file_sa.name
}
regarding the error that you got :
Error: checking for presence of existing Directory ... directories.Client#Get: Failure sending request: StatusCode=0 -- Original Error: Get "https://abcdxxxyyyzzz.file.core.windows.net/fileshare/dev?restype=directory": read tcp 192.168.1.3:61175->20.60.179.37:443: read: connection reset by peer
Please note that :
VNet peering will not be able to give access to file share. Virtual
network peering with virtual networks hosted in the private endpoint
give NFS share access to the clients in peered virtual networks .Each
of virtual network or subnet must be individually added to the
allowlist.
A checking for presence of existing Directory occurs if the terraform is not initiated .Run Terraform init and then try to Terraform plan and terraform apply.
References:
Cannot create azurerm_storage_container in azurerm_storage_account that uses network_rules · GitHub
NFS Azure file share problems | learn.microsoft.com

Terraform script for provisioning azure Virtual machine scale set along with custom data or cloud init

I would like to know the terraform script for provisioning azure Virtual machine scale set along with custom data or cloud init.
I tried many ways to run my script against VMSS but its not working.As per my understanding during provisioning of VMSS I should run some shell scripts so that It can install necessary agents (New relic) into all VMSS instances.
Looking for terraform script for VMSS along with custom data or cloudinit configuration.
I used this one a while ago:
resource "azurerm_linux_virtual_machine_scale_set" "jumpserver" {
name = "${local.prefix}-jumpservers-vmss"
resource_group_name = azurerm_resource_group.deployment.name
location = azurerm_resource_group.deployment.location
sku = "Standard_B2s"
instances = 2
overprovision = false
single_placement_group = false
admin_username = "adminuser"
admin_password = azurerm_key_vault_secret.vmsecret.value
disable_password_authentication = false
custom_data = base64encode(data.local_file.cloudinit.content)
source_image_reference {
publisher = "canonical"
offer = "0001-com-ubuntu-server-focal"
sku = "20_04-lts"
version = "latest"
}
os_disk {
storage_account_type = "Standard_LRS"
caching = "ReadWrite"
}
network_interface {
name = "${local.prefix}-jumpserver-vmss-nic"
primary = true
ip_configuration {
name = "${local.prefix}-jumpserver-vmss-ipconfig"
primary = true
subnet_id = azurerm_subnet.jumpservers_vmss.id
}
}
boot_diagnostics {
storage_account_uri = null
}
}
# Data template cloud-init bootstrapping file used by the VMSS
data "local_file" "cloudinit" {
filename = "${path.module}/cloudinit.conf"
}
cloudinit.conf
#cloud-config
bootcmd:
- mkdir -p /etc/systemd/system/walinuxagent.service.d
- echo "[Unit]\nAfter=cloud-final.service" > /etc/systemd/system/walinuxagent.service.d/override.conf
- sed "s/After=multi-user.target//g" /lib/systemd/system/cloud-final.service > /etc/systemd/system/cloud-final.service
- systemctl daemon-reload
package_update: true
package_upgrade: true
# Add external package sources, e.g. for Microsoft packages and Kubernetes
apt:
preserve_sources_list: true
sources_list: |
deb $MIRROR $RELEASE main restricted
deb-src $MIRROR $RELEASE main restricted
deb $PRIMARY $RELEASE universe restricted
deb $SECURITY $RELEASE-security multiverse
sources:
microsoft-azurecli.list:
source: "deb https://packages.microsoft.com/repos/azure-cli focal main"
key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.7 (GNU/Linux)
mQENBFYxWIwBCADAKoZhZlJxGNGWzqV+1OG1xiQeoowKhssGAKvd+buXCGISZJwT
LXZqIcIiLP7pqdcZWtE9bSc7yBY2MalDp9Liu0KekywQ6VVX1T72NPf5Ev6x6DLV
7aVWsCzUAF+eb7DC9fPuFLEdxmOEYoPjzrQ7cCnSV4JQxAqhU4T6OjbvRazGl3ag
OeizPXmRljMtUUttHQZnRhtlzkmwIrUivbfFPD+fEoHJ1+uIdfOzZX8/oKHKLe2j
H632kvsNzJFlROVvGLYAk2WRcLu+RjjggixhwiB+Mu/A8Tf4V6b+YppS44q8EvVr
M+QvY7LNSOffSO6Slsy9oisGTdfE39nC7pVRABEBAAG0N01pY3Jvc29mdCAoUmVs
ZWFzZSBzaWduaW5nKSA8Z3Bnc2VjdXJpdHlAbWljcm9zb2Z0LmNvbT6JATUEEwEC
AB8FAlYxWIwCGwMGCwkIBwMCBBUCCAMDFgIBAh4BAheAAAoJEOs+lK2+EinPGpsH
/32vKy29Hg51H9dfFJMx0/a/F+5vKeCeVqimvyTM04C+XENNuSbYZ3eRPHGHFLqe
MNGxsfb7C7ZxEeW7J/vSzRgHxm7ZvESisUYRFq2sgkJ+HFERNrqfci45bdhmrUsy
7SWw9ybxdFOkuQoyKD3tBmiGfONQMlBaOMWdAsic965rvJsd5zYaZZFI1UwTkFXV
KJt3bp3Ngn1vEYXwijGTa+FXz6GLHueJwF0I7ug34DgUkAFvAs8Hacr2DRYxL5RJ
XdNgj4Jd2/g6T9InmWT0hASljur+dJnzNiNCkbn9KbX7J/qK1IbR8y560yRmFsU+
NdCFTW7wY0Fb1fWJ+/KTsC4=
=J6gs
-----END PGP PUBLIC KEY BLOCK-----
microsoft-prod.list:
source: "deb https://packages.microsoft.com/ubuntu/20.04/prod focal main"
key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.7 (GNU/Linux)
mQENBFYxWIwBCADAKoZhZlJxGNGWzqV+1OG1xiQeoowKhssGAKvd+buXCGISZJwT
LXZqIcIiLP7pqdcZWtE9bSc7yBY2MalDp9Liu0KekywQ6VVX1T72NPf5Ev6x6DLV
7aVWsCzUAF+eb7DC9fPuFLEdxmOEYoPjzrQ7cCnSV4JQxAqhU4T6OjbvRazGl3ag
OeizPXmRljMtUUttHQZnRhtlzkmwIrUivbfFPD+fEoHJ1+uIdfOzZX8/oKHKLe2j
H632kvsNzJFlROVvGLYAk2WRcLu+RjjggixhwiB+Mu/A8Tf4V6b+YppS44q8EvVr
M+QvY7LNSOffSO6Slsy9oisGTdfE39nC7pVRABEBAAG0N01pY3Jvc29mdCAoUmVs
ZWFzZSBzaWduaW5nKSA8Z3Bnc2VjdXJpdHlAbWljcm9zb2Z0LmNvbT6JATUEEwEC
AB8FAlYxWIwCGwMGCwkIBwMCBBUCCAMDFgIBAh4BAheAAAoJEOs+lK2+EinPGpsH
/32vKy29Hg51H9dfFJMx0/a/F+5vKeCeVqimvyTM04C+XENNuSbYZ3eRPHGHFLqe
MNGxsfb7C7ZxEeW7J/vSzRgHxm7ZvESisUYRFq2sgkJ+HFERNrqfci45bdhmrUsy
7SWw9ybxdFOkuQoyKD3tBmiGfONQMlBaOMWdAsic965rvJsd5zYaZZFI1UwTkFXV
KJt3bp3Ngn1vEYXwijGTa+FXz6GLHueJwF0I7ug34DgUkAFvAs8Hacr2DRYxL5RJ
XdNgj4Jd2/g6T9InmWT0hASljur+dJnzNiNCkbn9KbX7J/qK1IbR8y560yRmFsU+
NdCFTW7wY0Fb1fWJ+/KTsC4=
=J6gs
-----END PGP PUBLIC KEY BLOCK-----
kubernetes:
source: "deb http://apt.kubernetes.io/ kubernetes-xenial main"
keyid: 7F92E05B31093BEF5A3C2D38FEEA9169307EA071
# Install packages via apt. To add packages it might be required to add additional sources above.
packages:
- unzip
- git
- wget
- curl
- apt-transport-https
- software-properties-common
- powershell
- azure-cli
- npm
- docker.io
- packages-microsoft-prod
- dotnet-sdk-6.0
- kubectl
# Install latest version of azcopy (can not be installed via apt)
runcmd:
# Download AzCopy and extract archive
- wget https://aka.ms/downloadazcopy-v10-linux
- tar -xvf downloadazcopy-v10-linux
# Move AzCopy to the destination
- sudo cp ./azcopy_linux_amd64_*/azcopy /usr/bin/
# Allow execution for all users
- sudo chmod +x /usr/bin/azcopy
# create the docker group
groups:
- docker
# Add default auto created user to docker group
system_info:
default_user:
groups: [docker]
final_message: "The system is finally up, after $UPTIME seconds"
This is the below solution finally I could able to accomplish via terraform using custom data.
terraform {
required_version = ">=0.12"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "vmss" {
name = var.resource_group_name
location = var.location
tags = var.tags
}
resource "random_string" "fqdn" {
length = 6
special = false
upper = false
number = false
}
resource "azurerm_virtual_network" "vmss" {
name = "vmss-vnet"
address_space = ["10.0.0.0/16"]
location = var.location
resource_group_name = azurerm_resource_group.vmss.name
tags = var.tags
}
resource "azurerm_subnet" "vmss" {
name = "vmss-subnet"
resource_group_name = azurerm_resource_group.vmss.name
virtual_network_name = azurerm_virtual_network.vmss.name
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_public_ip" "vmss" {
name = "vmss-public-ip"
location = var.location
resource_group_name = azurerm_resource_group.vmss.name
allocation_method = "Static"
domain_name_label = random_string.fqdn.result
tags = var.tags
}
resource "azurerm_virtual_machine_scale_set" "vmss" {
name = "vmscaleset"
location = var.location
resource_group_name = azurerm_resource_group.vmss.name
upgrade_policy_mode = "Manual"
sku {
name = "Standard_DS1_v2"
tier = "Standard"
capacity = 2
}
storage_profile_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
storage_profile_os_disk {
name = ""
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
os_profile {
computer_name_prefix = "vmlab"
admin_username = var.admin_user
admin_password = var.admin_password
**custom_data = file("test.sh") **// This is the key line to pass any custom data to VMSS so that during VM spin up each time automatically script will be invoked and will be executed.**
}
os_profile_linux_config {
disable_password_authentication = false
}
network_profile {
name = "terraformnetworkprofile"
primary = true
ip_configuration {
name = "IPConfiguration"
subnet_id = azurerm_subnet.vmss.id
#load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.bpepool.id]
primary = true
}
}
tags = var.tags
}

Read file and save output to local_file

I'm trying to read the content of a file on an azurerm_linux_virtual_machine and save it to a local_file so that an ansible playbook can reference it later. Currently the .tf looks like this
resource "azurerm_linux_virtual_machine" "vm" {
name = myvm
location = myzone
resource_group_name = azurerm_resource_group.azureansibledemo.name
network_interface_ids = [azurerm_network_interface.myterraformnic.id]
size = "Standard_DS1_v2"
os_disk {
name = "storage"
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
computer_name = myvm
admin_username = "azureuser"
disable_password_authentication = true
custom_data = base64encode(file("telnet.sh"))
admin_ssh_key {
username = "azureuser"
public_key = tls_private_key.ansible_ssh_key.public_key_openssh
}
boot_diagnostics {
storage_account_uri = azurerm_storage_account.mystorageaccount.primary_blob_endpoint
}
}
output "myoutput" {
value = file("/tmp/output.yml")
}
resource "local_file" "testoutput" {
content = <<-DOC
${file("/tmp/output.yml")}
DOC
filename = "test.yml"
}
But when i run terraform plan i get the following error
Error: Invalid function argument
on main.tf line 181, in resource "local_file" "testoutput":
181: ${file("/tmp/output.yml")}
Invalid value for "path" parameter: no file exists at /tmp/output.yml; this
function works only with files that are distributed as part of the
configuration source code, so if this file will be created by a resource in
this configuration you must instead obtain this result from an attribute of
that resource.
The output myoutput is fine and returns no errors, this only occurs when i add in the resource local_file. Is there a way to get the output of a file to a local_file?
Copping remote files to local is not supported by TF.
The workaround is to use scp in local-exec as shown here. For example:
provisioner "local-exec" {
command = "scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${var.openstack_keypair} ubuntu#${openstack_networking_floatingip_v2.wr_manager_fip.address}:~/client.token ."
}

SSH connection to Azure VM with Terraform

I have successfully created a VM as part of a Resource Group on Azure using Terraform. Next step is to ssh in the new machine and run a few commands. For that, I have created a provisioner as part of the VM resource and set up an SSH connection:
resource "azurerm_virtual_machine" "helloterraformvm" {
name = "terraformvm"
location = "West US"
resource_group_name = "${azurerm_resource_group.helloterraform.name}"
network_interface_ids = ["${azurerm_network_interface.helloterraformnic.id}"]
vm_size = "Standard_A0"
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "14.04.2-LTS"
version = "latest"
}
os_profile {
computer_name = "hostname"
user = "some_user"
password = "some_password"
}
os_profile_linux_config {
disable_password_authentication = false
}
provisioner "remote-exec" {
inline = [
"sudo apt-get install docker.io -y"
]
connection {
type = "ssh"
user = "some_user"
password = "some_password"
}
}
}
If I run "terraform apply", it seems to get into an infinite loop trying to ssh unsuccessfully, repeating this log over and over:
azurerm_virtual_machine.helloterraformvm (remote-exec): Connecting to remote host via SSH...
azurerm_virtual_machine.helloterraformvm (remote-exec): Host:
azurerm_virtual_machine.helloterraformvm (remote-exec): User: testadmin
azurerm_virtual_machine.helloterraformvm (remote-exec): Password: true
azurerm_virtual_machine.helloterraformvm (remote-exec): Private key: false
azurerm_virtual_machine.helloterraformvm (remote-exec): SSH Agent: true
I'm sure I'm doing something wrong, but I don't know what it is :(
EDIT:
I have tried setting up this machine without the provisioner, and I can SSH to it no problems with the given username/passwd. However I need to look up the host name in the Azure portal because I don't know how to retrieve it from Terraform. It's suspicious that the "Host:" line in the log is empty, so I wonder if it has anything to do with that?
UPDATE:
I've tried with different things like indicating the host name in the connection with
host = "${azurerm_public_ip.helloterraformip.id}"
and
host = "${azurerm_public_ip.helloterraformips.ip_address}"
as indicated in the docs, but with no success.
I've also tried using ssh-keys instead of password, but same result - infinite loop of connection tries, with no clear error message as of why it's not connecting.
I have managed to make this work. I changed several things:
Gave name of host to connection.
Configured SSH keys properly - they need to be unencrypted.
Took the connection element out of the provisioner element.
Here's the full working Terraform file, replacing the data like SSH keys, etc.:
# Configure Azure provider
provider "azurerm" {
subscription_id = "${var.azure_subscription_id}"
client_id = "${var.azure_client_id}"
client_secret = "${var.azure_client_secret}"
tenant_id = "${var.azure_tenant_id}"
}
# create a resource group if it doesn't exist
resource "azurerm_resource_group" "rg" {
name = "sometestrg"
location = "ukwest"
}
# create virtual network
resource "azurerm_virtual_network" "vnet" {
name = "tfvnet"
address_space = ["10.0.0.0/16"]
location = "ukwest"
resource_group_name = "${azurerm_resource_group.rg.name}"
}
# create subnet
resource "azurerm_subnet" "subnet" {
name = "tfsub"
resource_group_name = "${azurerm_resource_group.rg.name}"
virtual_network_name = "${azurerm_virtual_network.vnet.name}"
address_prefix = "10.0.2.0/24"
#network_security_group_id = "${azurerm_network_security_group.nsg.id}"
}
# create public IPs
resource "azurerm_public_ip" "ip" {
name = "tfip"
location = "ukwest"
resource_group_name = "${azurerm_resource_group.rg.name}"
public_ip_address_allocation = "dynamic"
domain_name_label = "sometestdn"
tags {
environment = "staging"
}
}
# create network interface
resource "azurerm_network_interface" "ni" {
name = "tfni"
location = "ukwest"
resource_group_name = "${azurerm_resource_group.rg.name}"
ip_configuration {
name = "ipconfiguration"
subnet_id = "${azurerm_subnet.subnet.id}"
private_ip_address_allocation = "static"
private_ip_address = "10.0.2.5"
public_ip_address_id = "${azurerm_public_ip.ip.id}"
}
}
# create storage account
resource "azurerm_storage_account" "storage" {
name = "someteststorage"
resource_group_name = "${azurerm_resource_group.rg.name}"
location = "ukwest"
account_type = "Standard_LRS"
tags {
environment = "staging"
}
}
# create storage container
resource "azurerm_storage_container" "storagecont" {
name = "vhd"
resource_group_name = "${azurerm_resource_group.rg.name}"
storage_account_name = "${azurerm_storage_account.storage.name}"
container_access_type = "private"
depends_on = ["azurerm_storage_account.storage"]
}
# create virtual machine
resource "azurerm_virtual_machine" "vm" {
name = "sometestvm"
location = "ukwest"
resource_group_name = "${azurerm_resource_group.rg.name}"
network_interface_ids = ["${azurerm_network_interface.ni.id}"]
vm_size = "Standard_A0"
storage_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
storage_os_disk {
name = "myosdisk"
vhd_uri = "${azurerm_storage_account.storage.primary_blob_endpoint}${azurerm_storage_container.storagecont.name}/myosdisk.vhd"
caching = "ReadWrite"
create_option = "FromImage"
}
os_profile {
computer_name = "testhost"
admin_username = "testuser"
admin_password = "Password123"
}
os_profile_linux_config {
disable_password_authentication = false
ssh_keys = [{
path = "/home/testuser/.ssh/authorized_keys"
key_data = "ssh-rsa xxx email#something.com"
}]
}
connection {
host = "sometestdn.ukwest.cloudapp.azure.com"
user = "testuser"
type = "ssh"
private_key = "${file("~/.ssh/id_rsa_unencrypted")}"
timeout = "1m"
agent = true
}
provisioner "remote-exec" {
inline = [
"sudo apt-get update",
"sudo apt-get install docker.io -y",
"git clone https://github.com/somepublicrepo.git",
"cd Docker-sample",
"sudo docker build -t mywebapp .",
"sudo docker run -d -p 5000:5000 mywebapp"
]
}
tags {
environment = "staging"
}
}
According to your description, Azure Custom Script Extension is an option for you.
The Custom Script Extension downloads and executes scripts on Azure
virtual machines. This extension is useful for post deployment
configuration, software installation, or any other configuration /
management task.
Remove provisioner "remote-exec" instead of below:
resource "azurerm_virtual_machine_extension" "helloterraformvm" {
name = "hostname"
location = "West US"
resource_group_name = "${azurerm_resource_group.helloterraformvm.name}"
virtual_machine_name = "${azurerm_virtual_machine.helloterraformvm.name}"
publisher = "Microsoft.OSTCExtensions"
type = "CustomScriptForLinux"
type_handler_version = "1.2"
settings = <<SETTINGS
{
"commandToExecute": "apt-get install docker.io -y"
}
SETTINGS
}
Note: Command is executed by root user, don't use sudo.
More information please refer to this link: azurerm_virtual_machine_extension.
For a list of possible extensions, you can use the Azure CLI command az vm extension image list -o table
Update: The above example only supports single command. If you need to multiple commands. Like install docker on your VM, you need
apt-get update
apt-get install docker.io -y
Save it as a file named script.sh and save it to Azure Storage account or GitHub(The file should be public). Modify terraform file like below:
settings = <<SETTINGS
{
"fileUris": ["https://gist.githubusercontent.com/Walter-Shui/dedb53f71da126a179544c91d267cdce/raw/bb3e4d90e3291530570eca6f4ff7981fdcab695c/script.sh"],
"commandToExecute": "sh script.sh"
}
SETTINGS

Resources