I want to deploy some AKS in Azure using Terraform but it is taking too much time to create more than 1 hour and the terraform job never finish. In the portal the AKS stay in Creating status. I'm deploying the AKS in the EastUS2 region
The vm size that I used are not too big so I don't know understand what could be the problem
This is my main file:
main.tf
# Configure the Microsoft Azure Provider.
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">= 2.26"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 1.24.6"
}
azapi = {
source = "azure/azapi"
version = ">=1.1.0"
}
}
required_version = ">= 0.14.9"
}
provider "azurerm" {
features {}
}
provider "kubernetes" {
host = module.aks.host
username = module.aks.username
password = module.aks.password
client_certificate = module.aks.client_certificate
client_key = base64decode(module.aks.client_key)
cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate)
}
provider "azapi" {
# subscription_id = data.azurerm_client_config.current.subscription_id
# tentat_id = data.azurerm_client_config.current.tenant_id
}
data "azurerm_client_config" "current" {}
module "ResourceGroup" {
source = "./ResourceGroup"
}
module "Networks" {
source = "./Networks"
resource_group_name = module.ResourceGroup.rg_name_out
location = module.ResourceGroup.rg_location_out
}
module "acr" {
source = "./ACR"
resource_group_name = module.ResourceGroup.rg_name_out
location = module.ResourceGroup.rg_location_out
name = var.acr_name
sku = var.acr_sku
admin_enabled = var.acr_admin_enabled
georeplication_locations = var.acr_georeplication_locations
soft_delete_policy_status = var.acr_soft_delete_policy_status
soft_delete_policy_days = var.acr_soft_delete_policy_days
identity_name = var.acr_identity_name
tags = var.tags
# depends_on = [module.StorageAccount]
}
module "aks" {
source = "./AKS"
location = module.ResourceGroup.rg_location_out
resource_group_name = module.ResourceGroup.rg_name_out
acr_id = module.acr.id
name = var.aks_cluster_name
kubernetes_version = var.aks_kubernetes_version
dns_prefix = lower(var.aks_cluster_name)
private_cluster_enabled = var.aks_private_cluster_enabled
automatic_channel_upgrade = var.aks_automatic_channel_upgrade
sku_tier = var.aks_sku_tier
identity_name = var.aks_identity_name
api_server_authorized_ip_ranges = [] #module.Networks.subnet_address_bastion
azure_policy_enabled = var.aks_azure_policy_enabled
http_application_routing_enabled = var.aks_http_application_routing_enabled
network_profile = var.aks_network_profile
aci_connector_linux = var.aks_aci_connector_linux
azure_ad_rbac_managed = var.aks_azure_ad_rbac_managed
tenant_id = data.azurerm_client_config.current.tenant_id
admin_group_object_ids = var.aks_admin_group_object_ids
azure_rbac_enabled = var.aks_azure_rbac_enabled
admin_username = var.aks_admin_username
ssh_public_key = var.aks_ssh_public_key
tags = var.tags
depends_on = [module.Networks, module.acr]
default_node_pool = {
name = "system"
vm_size = "Standard_D2s_v3"
node_count = 1
enable_auto_scaling = true
max_count = 1
min_count = 1
max_surge = "50%"
max_pods = 36
os_disk_size_gb = 50
os_disk_type = "Managed"
ultra_ssd_enabled = true
zones = ["1", "2","3"]
node_labels = { "workload" = "system" }
node_taints = [ "workload=system:NoSchedule" ]
vnet_subnet_id = module.Networks.subnet_id
orchestrator_version = var.aks_kubernetes_version
}
node_pools = [
{
name = "batch"
mode = "User"
vm_size = "Standard_D2s_v3"
node_count = 1
enable_auto_scaling = true
max_count = 1
min_count = 1
max_surge = "50%"
max_pods = 36
os_disk_size_gb = 50
os_disk_type = "Managed"
ultra_ssd_enabled = true
zones = ["1", "2","3"]
node_labels = { "workload" = "batch" }
node_taints = [ "workload=batch:NoSchedule" ]
vnet_subnet_id = module.Networks.subnet_id
orchestrator_version = var.aks_kubernetes_version
}
]
}
Related
When deploying with some other components (of a data pipeline) like function app and cosmosdb, metrics graphs on azure event hubs portal are not appearing instead graphs show "Resource not found".
But when I deploy the same terraform code for event hubs namespace without other components, metric graphs appears. Here is terraform code:
locals {
ip_rule_map = flatten([
for cidr in ["182.191.83.208"] : [
{
action = "Allow"
ip_mask = cidr
}
]
])
}
resource "azurerm_eventhub_namespace" "avro-ingestion" {
name = "test-eh"
location = "Central US"
resource_group_name = "test-rg"
sku = "Standard"
capacity = 1
network_rulesets = false ? [{
default_action = "Deny"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}] : [
{
default_action = "Allow"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}
]
tags = {
Name = "avro-ingestion"
Purpose = "data-ingestion-infra-deployment"
CreatedBy = "emumba"
}
}
resource "azurerm_eventhub_namespace_authorization_rule" "user_managed" {
name = "UserManagedSharedAccessKey"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
listen = true
send = true
manage = true
}
resource "null_resource" "schema-registry" {
depends_on = [
azurerm_eventhub_namespace.avro-ingestion
]
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
command = "az eventhubs namespace schema-registry create --name test-schema-group --namespace-name test-eh --resource-group test-rg --schema-compatibility Backward --schema-type Avro"
}
}
resource "azurerm_eventhub" "thunder" {
name = "test"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
partition_count = 2
message_retention = 1
}
resource "azurerm_eventhub_consumer_group" "function-app-cg" {
name = "fApp-cons-group"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
eventhub_name = azurerm_eventhub.thunder.name
resource_group_name = "test-rg"
}
main.tf file where I am calling all modules along with event hubs namespace module:
resource "random_string" "random_Sacc1" {
length = 4
special = false
upper = false
min_lower = 1
min_numeric = 1
}
resource "random_string" "random_Sacc2" {
length = 2
special = false
upper = false
min_lower = 1
min_numeric = 1
}
module "azure-resource-group" {
source = "../../modules/resource-group"
region = var.region
res_group_name = var.res_group_name
}
module "azure-virtual-network" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/virtual-network"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
vn_name = var.vn_name
vn_cidrs = var.vn_cidrs
subnets = var.subnets
subnet_cidrs = var.subnet_cidrs
pub_nsg_name = var.pub_nsg_name
private_nsg_name = var.private_nsg_name
internet_ip_cidr_list = var.internet_ip_cidr_list
}
module "azure-ad-app-registration" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/app-role-assignment"
app-display-name = var.app-display-name
rg_name = module.azure-resource-group.name
}
module "azure-eventhubs" {
source = "../../modules/event-hubs"
ns_name = var.eventhub_namespace_name
eventhub_name = var.eventhub_name
cons_group_name = var.cons_group_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
schema_group_name = var.eventhub_schema_group_name
}
module "azure-storage-account" {
depends_on = [
module.azure-virtual-network
]
source = "../../modules/storage-account"
storage_acc_name = "${var.storage_acc_name}${random_string.random_Sacc1.id}"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
}
module "azure-cosmos-db" {
source = "../../modules/cosmos-db"
acc_name = var.cosmos_acc_name
db_name = var.cosmos_db_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
cosmos_db_container_name = var.cosmos_db_container_name
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
synapse_link = var.enable_synapse_link
}
# module "fApp-azure-storage-account" {
# source = "../../modules/storage-account"
# storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
# rg_name = module.azure-resource-group.name
# rg_location = module.azure-resource-group.location
# enable_private_access = var.enable_private_access
# cidr_list = var.public_cidr_list
# private_ep_subnet = element(module.azure-virtual-network.subnet_id_list, 1)
# dns_zone_name = var.dns_zone_name_private_ep
# }
module "data-ingestion-fApp" {
depends_on = [
module.azure-cosmos-db,
module.azure-eventhubs,
module.azure-storage-account
]
source = "../../modules/function-app"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
application_insight_name = var.application_insight_name
fApp_service_plan_name = var.fApp_service_plan_name
fApp_name = var.fApp_name
fApp-storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
enable_private_access = var.enable_private_access
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
integration_vnet_name = module.azure-virtual-network.vnet-name
integration_subnet_name = module.azure-virtual-network.private-subent2-name
func_configurations = { "AZURE_CLIENT_ID" = module.azure-ad-app-registration.client_id
"AZURE_CLIENT_SECRET" = module.azure-ad-app-registration.client_secret,
"AZURE_TENANT_ID" = module.azure-ad-app-registration.tenant_id,
"EVENTHUB_NAME" = var.eventhub_name,
"EVENTHUB_FULLY_QUALIFIED_NAMESPACE" = "${var.eventhub_namespace_name}.servicebus.windows.net",
"SCHEMA_GROUP_NAME" = var.eventhub_schema_group_name,
"OUTPUT_CONTAINER" = var.storage_acc_container_name,
"OUTPUT_PATH" = var.storage_acc_container_path,
"COSMOS_DB_URI" = module.azure-cosmos-db.cosmos_account_uri,
"COSMOS_DB_PRIMARY_KEY" = module.azure-cosmos-db.cosmos_account_primary_key,
"COSMOS_DB_NAME" = var.cosmos_db_name,
"COSMOS_DB_CONTAINER_NAME" = var.cosmos_db_container_name,
"a10devops_namespace_connection" = module.azure-eventhubs.eventhub_conn_str,
"a10devops_storage_connection" = module.azure-storage-account.storage_account_conn_str }
}
I have .tfvars file below with below contents as the the input variables
aks_configuration = {
aks1 = {
name = "cnitest"
location = "westeurope"
kubernetes_version = "1.22.4"
dns_prefix = "cnitest"
default_nodepool_name = "general"
default_nodepool_size = "Standard_B2s"
default_nodepool_count = 2
default_node_pool_autoscale = true
default_node_pool_autoscale_min_count = 1
default_node_pool_autoscale_max_count = 2
aks_zones = null
network_plugin = null
network_policy = null
vnet_name = null
vnet_enabled = false
subnet_name = null
objectID= ["*********]
nodepool = [
{
name = "dts"
vm_size = "Standard_B2s"
enable_auto_scaling = true
mode = "user"
node_count = 1
max_count = 2
min_count = 1
}
]
}
}
Now I need to create an aks cluster with conditions to choose azurecni or kubenet as the part of the network configuration.
if the vnet_enabled is false it should disable data resource in the terraform and give the value as null for the below configuration
#get nodepool vnet subnet ID
data "azurerm_subnet" "example" {
for_each = local.aks_config.vnet_enabled
name = each.value.subnet_name
virtual_network_name = each.value.vnet_name
resource_group_name = var.rg-name
}
resource "azurerm_kubernetes_cluster" "example" {
for_each = local.aks_config
name = each.value.name
location = each.value.location
resource_group_name = var.rg-name
dns_prefix = each.value.dns_prefix
default_node_pool {
name = each.value.default_nodepool_name
node_count = each.value.default_nodepool_count
vm_size = each.value.default_nodepool_size
enable_auto_scaling = each.value.default_node_pool_autoscale
min_count = each.value.default_node_pool_autoscale_min_count
max_count = each.value.default_node_pool_autoscale_max_count
vnet_subnet_id = data.azurerm_subnet.example[each.key].id
zones = each.value.aks_zones
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = each.value.network_plugin
network_policy = each.value.network_policy
}
# azure_active_directory_role_based_access_control {
# managed = true
# admin_group_object_ids = [each.value.objectID]
# }
}
If the vnet integration needs to be done which is the parameter vnet_enabled is set to true then the data block fetches the details of subnet to be used by the AKS Cluster and then uses it and also sets the network plugin to azure . If its false then the data block is not utilized and the subnet is provided null value in AKS cluster and it uses `kubenet. You don't need to add network plugin and network policy in the locals , you can leverage the same in the conditional values.
To achieve the above , You have to use something like below :
data "azurerm_subnet" "example" {
for_each = local.aks_configuration.vnet_enabled ? 1 : 0
name = each.value.subnet_name
virtual_network_name = each.value.vnet_name
resource_group_name = var.rg-name
}
resource "azurerm_kubernetes_cluster" "example" {
for_each = local.aks_configuration
name = each.value.name
location = each.value.location
resource_group_name = var.rg-name
dns_prefix = each.value.dns_prefix
default_node_pool {
name = each.value.default_nodepool_name
node_count = each.value.default_nodepool_count
vm_size = each.value.default_nodepool_size
enable_auto_scaling = each.value.default_node_pool_autoscale
min_count = each.value.default_node_pool_autoscale_min_count
max_count = each.value.default_node_pool_autoscale_max_count
vnet_subnet_id = each.value.vnet_enabled ? data.azurerm_subnet.example[0].id : null
zones = each.value.aks_zones
}
identity {
type = "SystemAssigned"
}
network_profile {
network_plugin = each.value.vnet_enabled ? "azure" : "kubenet"
network_policy = each.value.vnet_enabled ? "azure" : "calico"
}
# azure_active_directory_role_based_access_control {
# managed = true
# admin_group_object_ids = [each.value.objectID]
# }
}
I am trying to terraform the provision of azure application gateway with trusted_root_certificates certificates in a key vault. But I am getting the following error:
"message":"The user, group or application 'name=Microsoft.Network/applicationGateways;appid=some-id;iss=https://sts.windows.net/xxx-xxx/' does not have secrets get permission on key vault 'jana-kv-ssi-test;location=australiaeast'.
And here's my terrafom code:
module "cert-kv" {
source = "./modules/kv"
resource_group_name = var.resource_group_name
project = var.project
location = var.location
environment = var.environment
default_tags = var.default_tags
kv_sku = var.kv_sku
kv_name = var.kv_name
kv_key_permissions = var.kv_key_permissions
kv_secret_permissions = var.kv_secret_permissions
kv_storage_permissions = var.kv_storage_permissions
certificate_permissions = var.certificate_permissions
cert_name = var.cert_name
local_cert_path = var.local_cert_path
local_cert_password = var.local_cert_password
root_cert_name = var.root_cert_name
root_cert_local_cert_path = var.root_cert_local_cert_path
root_cert_local_cert_password = var.root_cert_local_cert_password
}
module "app-gateway" {
source = "./modules/app_gateway"
resource_group_name = var.resource_group_name
environment = var.environment
default_tags = var.default_tags
project = var.project
location = var.location
gw_sku_name = var.gw_sku_name
gw_tier = var.gw_tier
frontend_port_settings = var.frontend_port_settings
autoscale_configuration_max_capacity = var.autoscale_configuration_max_capacity
appgw_zones = var.appgw_zones
appgw_private_ip = var.appgw_private_ip
appgw_subnet_id = module.application-subnets.app_subnet_id
cipher_suites = var.cipher_suites
tls_version = var.tls_version
appgw_backend_pools = var.appgw_backend_pools
appgw_backend_http_settings = var.appgw_backend_http_settings
appgw_http_listeners = var.appgw_http_listeners
ssl_certificates_configs = var.ssl_certificates_configs
appgw_routings = var.appgw_routings
appgw_redirect_configuration = var.appgw_redirect_configuration
gw_key_vault_id = module.cert-kv.keyvault_id # var.gw_key_vault_id
health_probe_config = var.health_probe_config
kv_secret_id_for_root_cert = module.cert-kv.root_secret_id
kv_secret_name_for_root_cert = var.root_cert_name
depends_on = [module.application-subnets, module.cert-kv]
}
And here are the resource files for above modules.
# key-vault
data "azurerm_client_config" "current" {}
resource "azurerm_key_vault" "cert_kv" {
name = join("-", [var.project, var.environment, var.kv_name])
location = var.location
resource_group_name = var.resource_group_name
enabled_for_disk_encryption = true
tenant_id = data.azurerm_client_config.current.tenant_id
soft_delete_retention_days = 7
purge_protection_enabled = false
sku_name = var.kv_sku
access_policy {
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = data.azurerm_client_config.current.object_id
key_permissions = var.kv_key_permissions
secret_permissions = var.kv_secret_permissions
storage_permissions = var.kv_storage_permissions
certificate_permissions = var.certificate_permissions
}
tags = var.default_tags
}
resource "azurerm_key_vault_certificate" "certs" {
name = var.cert_name
key_vault_id = azurerm_key_vault.cert_kv.id
certificate {
contents = filebase64(var.local_cert_path)
password = var.local_cert_password
}
}
resource "azurerm_key_vault_certificate" "root_cert" {
name = var.root_cert_name
key_vault_id = azurerm_key_vault.cert_kv.id
certificate {
contents = filebase64(var.root_cert_local_cert_path)
password = var.root_cert_local_cert_password
}
}
resource "azurerm_user_assigned_identity" "key_vault_read" {
resource_group_name = var.resource_group_name
location = var.location
name = join("-", [var.project, var.environment, "key_vault_read_permission"])
}
data "azurerm_client_config" "current" {}
resource "azurerm_key_vault_access_policy" "key_vault_role_policy" {
key_vault_id = var.gw_key_vault_id
tenant_id = data.azurerm_client_config.current.tenant_id
object_id = azurerm_user_assigned_identity.key_vault_read.principal_id
key_permissions = [
"Get","List",
]
secret_permissions = [
"Get","List",
]
}
# app-gw
resource "azurerm_application_gateway" "application-gateway" {
name = join("-", [var.project, var.environment, "app-gateway"])
location = var.location
resource_group_name = var.resource_group_name
tags = var.default_tags
sku {
name = var.gw_sku_name
tier = var.gw_tier
}
. . .
trusted_root_certificate {
name = var.kv_secret_name_for_root_cert
key_vault_secret_id = var.kv_secret_id_for_root_cert
}
. . .
identity {
type = "UserAssigned"
identity_ids = [azurerm_user_assigned_identity.key_vault_read.id]
}
lifecycle {
ignore_changes = [
url_path_map,
request_routing_rule
]
}
}
Can someone please help me?
I am trying to create multiple Azure VM and not able to assign VMs to different availability_set. Please see my code below:
Module "vm_dev":
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.82.0"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "rg_dev" {
name = "MYORG_RG_DEV"
location = var.location
}
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "${var.hostname_prefix}-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = var.subnet_id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "${var.hostname_prefix}-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("id.pub")
}
disable_password_authentication = true
}
vm.tf:
module "vm_dev" {
source = "./vm_dev"
vm_size = "Standard_D4s_v3"
hostname_prefix = "myorg"
group_name_prefix = var.group_prefix
location = var.location
subnet_id = local.subnet_id
ssh_key = local.ssh_public_key
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1" }
"aa-elastic-master-1" = { ip = "10.0.100.2" }
"xx-elastic-master-0" = { ip = "10.0.99.1" }
"xx-elastic-master-1" = { ip = "10.0.99.2" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3" }
"aa-elastic-data-1" = { ip = "10.0.100.4" }
"aa-elastic-data-2" = { ip = "10.0.100.5" }
"xx-elastic-data-0" = { ip = "10.0.99.3" }
"xx-elastic-data-1" = { ip = "10.0.99.4" }
"xx-elastic-data-2" = { ip = "10.0.99.5" }
}
}
This works fine and I am able to create VMs. So far each VM is being created without assigning to availability_set. I would like to specify to which availability_set each VM belongs to, something like this:
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.100.1", as = "azurerm_availability_set.as_aamaster.id" }
"aa-elastic-master-1" = { ip = "10.0.100.2", as = "azurerm_availability_set.as_aamaster.id" }
"xx-elastic-master-0" = { ip = "10.0.99.1", as = "azurerm_availability_set.as_xxmaster.id" }
"xx-elastic-master-1" = { ip = "10.0.99.2", as = "azurerm_availability_set.as_xxmaster.id" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.100.3", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-1" = { ip = "10.0.100.4", as = "azurerm_availability_set.as_aadata.id" }
"aa-elastic-data-2" = { ip = "10.0.100.5", as = "azurerm_availability_set.as_aadata.id" }
"xx-elastic-data-0" = { ip = "10.0.99.3", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-1" = { ip = "10.0.99.4", as = "azurerm_availability_set.as_xxdata.id" }
"xx-elastic-data-2" = { ip = "10.0.99.5", as = "azurerm_availability_set.as_xxdata.id" }
}
adding in module following code:
resource "azurerm_availability_set" "as_aamaster" {
name = "${var.hostname_prefix}-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "${var.hostname_prefix}-${each.key}"
computer_name = "${var.hostname_prefix}-${each.key}"
size = var.vm_size
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
...
gives me error
Error: Cannot parse Azure ID: parse "azurerm_availability_set.as_aamaster.id": invalid URI for request
on vm_dev/main.tf line 72, in resource "azurerm_linux_virtual_machine" "node_master":
72: availability_set_id = each.value.as
Any advice is appreciated.
Thanks
I tested your code and it failed with the same error as below :
So , For Solution as Ivan Ignatiev has suggested you will have to use the below :
instances_master = {
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "${azurerm_availability_set.as_aamaster.id}" }
"aa-elastic-master-1" = { ip = "10.0.2.2", as = "${azurerm_availability_set.as_aamaster.id}" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "${azurerm_availability_set.as_xxmaster.id}" }
"xx-elastic-master-1" = { ip = "10.0.2.4", as = "${azurerm_availability_set.as_xxmaster.id}" }
}
instances_data = {
"aa-elastic-data-0" = { ip = "10.0.2.5", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-1" = { ip = "10.0.2.6", as = "${azurerm_availability_set.as_aadata.id}" }
"aa-elastic-data-2" = { ip = "10.0.2.7", as = "${azurerm_availability_set.as_aadata.id}" }
"xx-elastic-data-0" = { ip = "10.0.2.8", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-1" = { ip = "10.0.2.9", as = "${azurerm_availability_set.as_xxdata.id}" }
"xx-elastic-data-2" = { ip = "10.0.2.10", as = "${azurerm_availability_set.as_xxdata.id}" }
}
main.tf
resource "azurerm_network_interface" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aamaster" {
name = "ansuman-as-aamaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxmaster" {
name = "ansuman-as-xxmaster"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_master" {
for_each = var.instances_master
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_master[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
resource "azurerm_network_interface" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}-nic"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
internal_dns_name_label = "ansuman-${each.key}"
ip_configuration {
name = "primary"
primary = true
subnet_id = azurerm_subnet.example.id
private_ip_address = each.value.ip
private_ip_address_allocation = "Static"
private_ip_address_version = "IPv4"
}
}
resource "azurerm_availability_set" "as_aadata" {
name = "ansuman-as-aadata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_availability_set" "as_xxdata" {
name = "ansuman-as-xxdata"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
managed = true
}
resource "azurerm_linux_virtual_machine" "node_data" {
for_each = var.instances_data
name = "ansuman-${each.key}"
computer_name = "ansuman-${each.key}"
size = "Standard_B1s"
resource_group_name = azurerm_resource_group.rg_dev.name
location = azurerm_resource_group.rg_dev.location
availability_set_id = each.value.as
network_interface_ids = [azurerm_network_interface.node_data[each.key].id]
os_disk {
name = "ansuman-${each.key}-disk-os"
storage_account_type = "StandardSSD_LRS"
caching = "ReadWrite"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
admin_username = "myuser"
admin_ssh_key {
username = "myuser"
public_key = file("~/.ssh/id_rsa.pub")
}
disable_password_authentication = true
}
Output:
AnsumanBal-MT, this did not work for me, I have added comment above, but i was able to solve this via:
"aa-elastic-master-0" = { ip = "10.0.2.1", as = "0" }
"xx-elastic-master-0" = { ip = "10.0.2.3", as = "1" }
in module:
resource "azurerm_availability_set" "as_dev" {
count = 5
name = "${var.hostname_prefix}-dev-${element(var.availability_set_name, count.index)}-as"
resource_group_name = azurerm_resource_group.rg_dev.name
location = var.location
}
for azurerm_linux_virtual_machine added:
availability_set_id = azurerm_availability_set.as_dev[each.value.as].id
variable:
variable "availability_set_name" {
description = "Availability set name that the VMs will be created in"
type = list(any)
default = ["aamaster", "xxmaster", "aadata", ....]
}
Terraform code is below
module "centos-vm-author-2" {
source = "terraform.automation.temp.com.au/temp/temp-linux-vm/azurerm"
version = "6.7.0"
location = var.resource_location
resource_group_name = var.resource_group_name_2
vm_count = "1"
tags = local.tags
size = var.vm_size
hostname_prefix = var.hostname_prefix
hostname_suffix_start_range = "491"
image_publisher = "OpenLogic"
image_offer = "Centos"
image_sku = "7_9"
subnet_id = var.auth_pub_subnet_id
admin_username = "azureadmin"
availability_set_id = azurerm_availability_set.aemfeature1authoras.id
patching_tags = local.patching_tags
ansible_vault_key = var.ansible_vault_key
log_to_loganalytics = false
ou_tags = local.ou_tags
os_disk_size = var.os_size_gb
os_disk_type = var.storage_account_type
server_access_memberships = ["CN=DSTDEVOPS,OU=DistributionGroups,OU=Groups,OU=Resources,DC=temp,DC=int"]
sudoers = ["%DSTDEVOPS"]
data_disks = [
[
{
disk_size_gb = var.disk_size_gb
storage_account_type = var.storage_account_type
caching = "ReadWrite"
create_option = "Empty"
source_resource_id = ""
write_accelerator_enabled = false
}
]
]
}
resource "null_resource" "centos-vm-author-ansible" {
provisioner "local-exec" {
command = <<EOF
ansible-playbook -i '${join(",", azurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
EOF
}
depends_on = [
module.centos-vm-author-2
]
}
}
Basically I want to tell Ansible the Private IP onto which it should execute the role.
I am getting error like below,
Error: [0m[0m[1mReference to undeclared resource[0m
on main.tf line 236, in resource "null_resource" "centos-vm-author-ansible":
ansible-playbook -i '${join(",", [4mazurerm_network_interface.centos-vm-author-2.*.private_ip_address)},'-e ansible_user=${var.admin_username} -e "role_name=automate-author" main.yaml
A managed resource "azurerm_network_interface" "centos-vm-author-2" has not
been declared in the root module.
Sincerely appreciate any help to understand what is the issue and how to have it resolved.
P.S: The TF Module code is like below:
resource "azurerm_network_interface" "main" {
count = var.vm_count
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-nic"
location = var.location
resource_group_name = var.resource_group_name
enable_accelerated_networking = var.enable_accelerated_networking
ip_configuration {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-ipconfig"
subnet_id = var.subnet_id
private_ip_address_allocation = var.private_ip_address_allocation
private_ip_address = var.private_ip_address
public_ip_address_id = var.enable_public_ip_address ? azurerm_public_ip.main[count.index].id : null
}
tags = var.tags
}
resource "azurerm_network_interface_backend_address_pool_association" "lbconf" {
count = var.backend_address_pool_id == null ? 0 : var.vm_count
network_interface_id = azurerm_network_interface.main[count.index].id
ip_configuration_name = azurerm_network_interface.main[count.index].ip_configuration[0].name
backend_address_pool_id = var.backend_address_pool_id
}
resource "azurerm_linux_virtual_machine" "main" {
count = var.vm_count
name = format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )
location = var.location
resource_group_name = var.resource_group_name
admin_username = var.admin_username
admin_ssh_key {
username = var.admin_username
public_key = chomp(tls_private_key.bootstrap_private_key.public_key_openssh)
}
disable_password_authentication = var.disable_password_authentication
network_interface_ids = [azurerm_network_interface.main[count.index].id]
size = var.size
availability_set_id = var.availability_set_id
source_image_reference {
publisher = var.image_publisher
offer = var.image_offer
sku = var.image_sku
version = var.image_version
}
os_disk {
name = "${format("${var.hostname_prefix}%04d", var.hostname_suffix_start_range + count.index, )}-osdisk"
caching = "ReadWrite"
storage_account_type = var.os_disk_type
disk_size_gb = var.os_disk_size
}
dynamic "identity" {
for_each = var.identity
content {
type = identity.value["type"]
identity_ids = identity.value["type"] == "SystemAssigned" ? [] : identity.value["identity_ids"]
}
}
dynamic "plan" {
for_each = var.marketplace_image ? [1] : []
content {
name = var.image_sku
product = var.image_offer
publisher = var.image_publisher
}
}
# boot_diagnostics {
# storage_account_uri = var.boot_diagnostics_storage_uri
# }
tags = var.ou_tags == null ? merge(var.tags, var.patching_tags) : merge(var.tags, var.ou_tags, var.patching_tags)
}
To refer to your module, instead of:
azurerm_network_interface.centos-vm-author-2.*.private_ip_address
it should be:
module.centos-vm-author-2.private_ip_addresses