Application Gateway ingress controller is added for aks in terraform file.After adding showing the above error.
My module in azure is below
Cluster Resource Group
resource “azurerm_resource_group” “aks” {
name = var.resource_group_name
location = var.location
}
AKS Cluster Network
module “aks_network” {
source = “…/modules/aks_network”
subnet_name = var.subnet_name
vnet_name = var.vnet_name
name = azurerm_resource_group.aks.name
subnet_cidr = var.subnet_cidr
location = var.location
address_space = var.address_space
app_gateway_subnet_name = var.app_gateway_subnet_name
app_gateway_subnet_address_prefix = var.app_gateway_subnet_address_prefix
}
AKS Log Analytics
module “log_analytics” {
source = “…/modules/log_analytics”
name = azurerm_resource_group.aks.name
log_analytics_workspace_location = var.log_analytics_workspace_location
log_analytics_workspace_name = var.log_analytics_workspace_name
log_analytics_workspace_sku = var.log_analytics_workspace_sku
}
AKS Cluster
module “aks_cluster” {
source = “…/modules/aks-cluster”
cluster_name = var.cluster_name
location = var.location
dns_prefix = var.dns_prefix
name = azurerm_resource_group.aks.name
kubernetes_version = var.kubernetes_version
node_count = var.node_count
min_count = var.min_count
max_count = var.max_count
vm_size = var.vm_size
service_cidr = var.service_cidr
network_plugin = var.network_plugin
vnet_subnet_id = module.aks_network.aks_subnet_id
client_id = var.client_id
client_secret = var.client_secret
environment = var.environment
subnet_id = module.aks_network.subnet_app_gateway_id
}
and below are the variable file for the above module
variables.tf
variable “client_id” {
description = “Azure Service Principal id (client id)”
}
variable “client_secret” {
description = “Azure client Service Principal secret (client secret)”
}
variable resource_group_name {
description = “Resource Group name”
}
variable “node_count” {
description = “number of nodes to deploy”
}
variable “dns_prefix” {
description = “DNS Suffix”
}
variable cluster_name {
description = “AKS cluster name”
}
variable location {
description = “azure location to deploy resources”
}
variable log_analytics_workspace_name {
description = “azure name to deploy log analytics workspace”
}
variable log_analytics_workspace_location {
description = “azure location to deploy log analytics workspace”
}
variable log_analytics_workspace_sku {
description = “azure sku to deploy log analytics workspace”
}
variable subnet_name {
description = “subnet id where the nodes will be deployed”
}
variable vnet_name {
description = “vnet id where the nodes will be deployed”
}
variable subnet_cidr {
description = “the subnet cidr range”
}
variable kubernetes_version {
description = “version of the kubernetes cluster”
}
variable “vm_size” {
description = “size/type of VM to use for nodes”
}
variable “service_cidr” {
description = “size/type of VM to use for nodes”
}
variable “network_plugin” {
description = “size/type of VM to use for nodes”
}
variable “address_space” {
description = “The address space that is used the virtual network”
}
variable “min_count” {
description = “Minimum Node Count”
}
variable “max_count” {
description = “Maximum Node Count”
}
variable “environment” {
description = “Environment”
}
variable “app_gateway_subnet_name” {
description = “App Gateway Subnet Name”
}
variable “app_gateway_subnet_address_prefix” {
description = “App Gateway Subnet Address Prefix”
}
aks_network
main.tf is as below
resource “azurerm_virtual_network” “aks_vnet” {
name = var.vnet_name
address_space = [var.address_space]
resource_group_name = var.name
location = var.location
}
resource “azurerm_subnet” “aks_subnet” {
name = var.subnet_name
resource_group_name = var.name
virtual_network_name = azurerm_virtual_network.aks_vnet.name
address_prefix = var.subnet_cidr
}
resource “azurerm_subnet” “subnet_app_gateway” {
resource_group_name = var.name
virtual_network_name = azurerm_virtual_network.aks_vnet.name
name = var.app_gateway_subnet_name
address_prefix = var.app_gateway_subnet_address_prefix
}
variables.tf
variable “subnet_name” {
description = “name to give the subnet”
}
variable “name” {
description = “resource group that the vnet resides in”
}
variable “vnet_name” {
description = “name of the vnet that this subnet will belong to”
}
variable “subnet_cidr” {
description = “the subnet cidr range”
}
variable “location” {
description = “the cluster location”
}
variable “address_space” {
description = “Network address space”
}
variable “app_gateway_subnet_name” {
description = “App Gateway Subnet Name.”
default = “agw-subnet”
}
variable “app_gateway_subnet_address_prefix” {
description = “Containers DNS server IP address.”
default = “10.100.0.0/24”
}
aks-cluster
main.tf
resource “azurerm_kubernetes_cluster” “cluster” {
name = var.cluster_name
location = var.location
resource_group_name = var.name
dns_prefix = var.dns_prefix
kubernetes_version = var.kubernetes_version
default_node_pool {
name = var.default_pool_name
node_count = var.node_count
vm_size = var.vm_size
vnet_subnet_id = var.vnet_subnet_id
type = var.default_pool_type
enable_auto_scaling = true
min_count = var.min_count
max_count = var.max_count
}
addon_profile {
azure_policy {
enabled = true
}
ingress_application_gateway {
enabled = true
subnet_id = resource.azurerm_subnet.subnet_app_gateway.id
}
}
role_based_access_control {
enabled = true
}
network_profile {
network_plugin = var.network_plugin
network_policy = "azure"
service_cidr = var.service_cidr
dns_service_ip = "10.0.0.10"
docker_bridge_cidr = "172.17.0.1/16"
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
tags = {
Environment = var.environment
}
}
variables.tf
variable “dns_prefix” {
description = “DNS prefix”
}
variable “location” {
description = “azure location to deploy resources”
}
variable “cluster_name” {
description = “AKS cluster name”
}
variable “name” {
description = “name of the resource group to deploy AKS cluster in”
}
variable “kubernetes_version” {
description = “version of the kubernetes cluster”
}
variable “agent_pool_name” {
description = “name for the agent pool profile”
default = “agentpool”
}
variable “agent_pool_type” {
description = “type of the agent pool (AvailabilitySet and VirtualMachineScaleSets)”
default = “VirtualMachineScaleSets”
}
variable “node_count” {
description = “number of nodes to deploy”
}
variable “vm_size” {
description = “size/type of VM to use for nodes”
}
variable “vnet_subnet_id” {
description = “vnet id where the nodes will be deployed”
}
variable “network_plugin” {
description = “network plugin for kubenretes network overlay (azure or calico)”
}
variable “service_cidr” {
description = “kubernetes internal service cidr range”
}
variable “client_id” {
description = “Service principle Client Id”
}
variable “client_secret” {
description = “Service principle Client Secret”
}
variable “min_count” {
description = “Minimum Node Count”
}
variable “max_count” {
description = “Maximum Node Count”
}
variable “default_pool_name” {
description = “name for the agent pool profile”
default = “agentpool”
}
variable “default_pool_type” {
description = “type of the agent pool (AvailabilitySet and VirtualMachineScaleSets)”
default = “VirtualMachineScaleSets”
}
variable “environment” {
description = “Environment”
}
there is no such variable "subnet_id" in the aks-cluster module so removed the subnet_id in aks_cluster after removing showing the below error
Unsupported block type in aks-cluster folder main.tf inside it Blocks of type ingress_application_gateway are not expected here.
In case anybody else gets to this page when searching for this error message. We fixed the error "Blocks of type ingress_application_gateway are not expected here" by upgrading terraform provider azurerm from version 2.43 to 2.73
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.73.0"
}
}
}
Related
I've followed the Microsoft article and created a Kubernetes cluster in Azure. Below is the complete terraform code that I have used to built the Kubernetes cluster along with the AppGateway.
variable "resource_group_name_prefix" {
default = "rg"
description = "Prefix of the resource group name that's combined with a random ID so name is unique in your Azure subscription."
}
variable "resource_group_location" {
default = "eastus"
description = "Location of the resource group."
}
variable "virtual_network_name" {
description = "Virtual network name"
default = "aksVirtualNetwork"
}
variable "virtual_network_address_prefix" {
description = "VNET address prefix"
default = "192.168.0.0/16"
}
variable "aks_subnet_name" {
description = "Subnet Name."
default = "kubesubnet"
}
variable "aks_subnet_address_prefix" {
description = "Subnet address prefix."
default = "192.168.0.0/24"
}
variable "app_gateway_subnet_address_prefix" {
description = "Subnet server IP address."
default = "192.168.1.0/24"
}
variable "app_gateway_name" {
description = "Name of the Application Gateway"
default = "ApplicationGateway1"
}
variable "app_gateway_sku" {
description = "Name of the Application Gateway SKU"
default = "Standard_v2"
}
variable "app_gateway_tier" {
description = "Tier of the Application Gateway tier"
default = "Standard_v2"
}
variable "aks_name" {
description = "AKS cluster name"
default = "aks-cluster1"
}
variable "aks_dns_prefix" {
description = "Optional DNS prefix to use with hosted Kubernetes API server FQDN."
default = "aks"
}
variable "aks_agent_os_disk_size" {
description = "Disk size (in GB) to provision for each of the agent pool nodes. This value ranges from 0 to 1023. Specifying 0 applies the default disk size for that agentVMSize."
default = 40
}
variable "aks_agent_count" {
description = "The number of agent nodes for the cluster."
default = 1
}
variable "aks_agent_vm_size" {
description = "VM size"
default = "Standard_B8ms"
}
variable "aks_service_cidr" {
description = "CIDR notation IP range from which to assign service cluster IPs"
default = "10.0.0.0/16"
}
variable "aks_dns_service_ip" {
description = "DNS server IP address"
default = "10.0.0.10"
}
variable "aks_docker_bridge_cidr" {
description = "CIDR notation IP for Docker bridge."
default = "172.17.0.1/16"
}
variable "aks_enable_rbac" {
description = "Enable RBAC on the AKS cluster. Defaults to false."
default = "false"
}
variable "vm_user_name" {
description = "User name for the VM"
default = "vmuser1"
}
variable "public_ssh_key_path" {
description = "Public key path for SSH."
default = "./keys/id_rsa.pub"
}
variable "tags" {
type = map(string)
default = {
source = "terraform"
}
}
# Locals block for hardcoded names
locals {
backend_address_pool_name = "${azurerm_virtual_network.test.name}-beap"
frontend_port_name = "${azurerm_virtual_network.test.name}-feport"
frontend_ip_configuration_name = "${azurerm_virtual_network.test.name}-feip"
http_setting_name = "${azurerm_virtual_network.test.name}-be-htst"
listener_name = "${azurerm_virtual_network.test.name}-httplstn"
request_routing_rule_name = "${azurerm_virtual_network.test.name}-rqrt"
app_gateway_subnet_name = "appgwsubnet"
subscription_id = "<subscription_id>"
tenant_id = "<tenant_id>"
client_id = "<client_id>"
client_secret = "<client_secret>"
client_objectid = "<client_objectid>"
}
terraform {
required_version = ">=0.12"
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.0"
}
}
}
provider "azurerm" {
subscription_id = local.subscription_id
tenant_id = local.tenant_id
client_id = local.client_id
client_secret = local.client_secret
features {}
}
resource "random_pet" "rg-name" {
prefix = var.resource_group_name_prefix
}
resource "azurerm_resource_group" "rg" {
name = random_pet.rg-name.id
location = var.resource_group_location
}
# User Assigned Identities
resource "azurerm_user_assigned_identity" "testIdentity" {
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
name = "identity1"
tags = var.tags
}
resource "azurerm_virtual_network" "test" {
name = var.virtual_network_name
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
address_space = [var.virtual_network_address_prefix]
subnet {
name = var.aks_subnet_name
address_prefix = var.aks_subnet_address_prefix
}
subnet {
name = "appgwsubnet"
address_prefix = var.app_gateway_subnet_address_prefix
}
tags = var.tags
}
data "azurerm_subnet" "kubesubnet" {
name = var.aks_subnet_name
virtual_network_name = azurerm_virtual_network.test.name
resource_group_name = azurerm_resource_group.rg.name
depends_on = [azurerm_virtual_network.test]
}
data "azurerm_subnet" "appgwsubnet" {
name = "appgwsubnet"
virtual_network_name = azurerm_virtual_network.test.name
resource_group_name = azurerm_resource_group.rg.name
depends_on = [azurerm_virtual_network.test]
}
# Public Ip
resource "azurerm_public_ip" "test" {
name = "publicIp1"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
allocation_method = "Static"
sku = "Standard"
tags = var.tags
}
resource "azurerm_application_gateway" "network" {
name = var.app_gateway_name
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
sku {
name = var.app_gateway_sku
tier = "Standard_v2"
capacity = 2
}
gateway_ip_configuration {
name = "appGatewayIpConfig"
subnet_id = data.azurerm_subnet.appgwsubnet.id
}
frontend_port {
name = local.frontend_port_name
port = 80
}
frontend_port {
name = "httpsPort"
port = 443
}
frontend_ip_configuration {
name = local.frontend_ip_configuration_name
public_ip_address_id = azurerm_public_ip.test.id
}
backend_address_pool {
name = local.backend_address_pool_name
}
backend_http_settings {
name = local.http_setting_name
cookie_based_affinity = "Disabled"
port = 80
protocol = "Http"
request_timeout = 1
}
http_listener {
name = local.listener_name
frontend_ip_configuration_name = local.frontend_ip_configuration_name
frontend_port_name = local.frontend_port_name
protocol = "Http"
}
request_routing_rule {
name = local.request_routing_rule_name
rule_type = "Basic"
http_listener_name = local.listener_name
backend_address_pool_name = local.backend_address_pool_name
backend_http_settings_name = local.http_setting_name
}
tags = var.tags
depends_on = [azurerm_virtual_network.test, azurerm_public_ip.test]
}
resource "azurerm_kubernetes_cluster" "k8s" {
name = var.aks_name
location = azurerm_resource_group.rg.location
dns_prefix = var.aks_dns_prefix
resource_group_name = azurerm_resource_group.rg.name
http_application_routing_enabled = false
linux_profile {
admin_username = var.vm_user_name
ssh_key {
key_data = file(var.public_ssh_key_path)
}
}
default_node_pool {
name = "agentpool"
node_count = var.aks_agent_count
vm_size = var.aks_agent_vm_size
os_disk_size_gb = var.aks_agent_os_disk_size
vnet_subnet_id = data.azurerm_subnet.kubesubnet.id
}
service_principal {
client_id = local.client_id
client_secret = local.client_secret
}
network_profile {
network_plugin = "azure"
dns_service_ip = var.aks_dns_service_ip
docker_bridge_cidr = var.aks_docker_bridge_cidr
service_cidr = var.aks_service_cidr
}
role_based_access_control {
enabled = var.aks_enable_rbac
}
depends_on = [azurerm_virtual_network.test, azurerm_application_gateway.network]
tags = var.tags
}
resource "azurerm_role_assignment" "ra1" {
scope = data.azurerm_subnet.kubesubnet.id
role_definition_name = "Network Contributor"
principal_id = local.client_objectid
depends_on = [azurerm_virtual_network.test]
}
resource "azurerm_role_assignment" "ra2" {
scope = azurerm_user_assigned_identity.testIdentity.id
role_definition_name = "Managed Identity Operator"
principal_id = local.client_objectid
depends_on = [azurerm_user_assigned_identity.testIdentity]
}
resource "azurerm_role_assignment" "ra3" {
scope = azurerm_application_gateway.network.id
role_definition_name = "Contributor"
principal_id = azurerm_user_assigned_identity.testIdentity.principal_id
depends_on = [azurerm_user_assigned_identity.testIdentity, azurerm_application_gateway.network]
}
resource "azurerm_role_assignment" "ra4" {
scope = azurerm_resource_group.rg.id
role_definition_name = "Reader"
principal_id = azurerm_user_assigned_identity.testIdentity.principal_id
depends_on = [azurerm_user_assigned_identity.testIdentity, azurerm_application_gateway.network]
}
I have created the id_rsa.pub key like mentioned below
AKS Cluster:
AppGateway:
and I have installed the AGIC by running the following commands
az account set --subscription "Dev3-Tooling"
az aks get-credentials --resource-group "rg-active-stag" --name "aks-cluster1"
kubectl apply -f https://raw.githubusercontent.com/Azure/aad-pod-identity/v1.8.6/deploy/infra/deployment.yaml --insecure-skip-tls-verify
helm repo add application-gateway-kubernetes-ingress https://appgwingress.blob.core.windows.net/ingress-azure-helm-package/
helm repo update
helm install ingress-azure -f helm-config.yaml application-gateway-kubernetes-ingress/ingress-azure --version 1.5.0
Azure Ingress Pod is in the unhealthy state
and I am getting the following error while trying to access the application deployed
I have tried to repro same in my lab environment and got below results.
Step-1:
After provisioning kubernetes cluster and Application Gateway, connect to the cluster using the below command
$ az aks get-credentials -g <resourceGroupName> -n <cluster-name>
Step-2:
Install Azure AD Pod identity for token-based access control
Refer the below command, if your cluster is RBAC enabled.
kubectl create -f https://raw.githubusercontent.com/azure/aad-pod-identity/master/deploy/infra/deployment-rbac.yaml"
Refer the below command, if your cluster is RBAC disabled.
kubectl create -f https://raw.githubusercontent.com/azure/aad-pod-identity/master/deploy/infra/deployment.yaml
Step-3:
Add application gateway ingress controller to the helm repository.
`$helm repo add application-gateway-kubernetes-ingress https://appgwingress.blob.core.windows.net/ingress-azure-helm-package/
$helm repo update
Step-4
Now write helm configuration file and save it to helm-config.yaml as below.
verbosityLevel: 3
appgw:
subscriptionId: <subscriptionId>
resourceGroup: <resourceGroupName>
name: <applicationGatewayName>
shared: false
armAuth:
type: aadPodIdentity
identityResourceID: <identityResourceId>
identityClientID: <identityClientId>
rbac:
enabled: false # true/false
aksClusterConfiguration:
apiServerAddress: <aks-api-server-address>
Step-5:
Now, install AGIC using above file.
$helm install -f helm-config.yaml --generate-name application-gateway-kubernetes-ingress/ingress-azure
After performing the above steps, you can verify ingress service using the below command.
$ kubectl get ingress
Access the IP address in above screenshot, you will be able to access the application.
I have created separate modules for vnet, NIC and VM.. I am trying to create two vms in the vm module and two nics in the nic module... created an output in the nic module to get the nic.id and this output am referring in the vm module , but only one vm gets created with two nics and second vm fails to create due to unavailability of nic... please find my code below, i need to be able to map the individual nic in the nic module to individual vm in the vm moodule
main.tf
module "nic" {
source = "./Nic"
resource_group_name = module.vnet1mod.rgnameout
location = module.vnet1mod.rglocationout
subnet_id = module.vnet1mod.subnetout
}
module "vnet1mod" {
source = "./vnetmodule"
}
module "virtualmachine" {
source = "./VirtualMachine"
resource_group_name = module.vnet1mod.rgnameout
location = module.vnet1mod.rglocationout
network_interface_ids = module.nic.netinterfaceoutput # this is where its failing !!
}
..............
nic module
resource "azurerm_network_interface" "nic1" {
for_each = var.vmdetails
name = each.value.vmnic
location = var.location
resource_group_name = var.resource_group_name
ip_configuration {
name = "internal"
subnet_id = var.subnet_id
private_ip_address_allocation = "Dynamic"
}
}
output "netinterfaceoutput" {
value = tomap({ for k, s in azurerm_network_interface.nic1 : k => s.id })
}
variable "location" {`enter code here`
type = string
description = "(optional) describe your variable"
}
variable "resource_group_name" {
type = string
description = "(optional) describe your variable"
}
variable "subnet_id" {
type = string
description = "(optional) describe your variable"
}
...........
vm module
resource "azurerm_windows_virtual_machine" "vm1" {
for_each = var.vmdetails
name = each.value.vmname
resource_group_name = var.resource_group_name
location = var.location
size = var.vmsize
admin_username = var.adminusername
admin_password = var.adminpassword
network_interface_ids = var.network_interface_ids
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = var.publisher
offer = var.offer
sku = var.sku
version = var.Osversion
}
}
variable "vmdetails" {
type = map(any)
default = {
"vm1" = {
vmname = "vmA-1"
vmnic = "vmnicA-1"
}
"vm2" = {
vmname = "vmA-2"
vmnic = "vmnicA-2"
}
}
}
........
vnet module
resource "azurerm_virtual_network" "vnet1" {
name = var.vnet_name
location = var.location_name
resource_group_name = var.resourcegroup1_name
address_space = var.vnet_address
}
resource "azurerm_subnet" "subnet1" {
name = var.subnet_name
resource_group_name = var.resourcegroup1_name
virtual_network_name = azurerm_virtual_network.vnet1.name
address_prefixes = var.subnet_address
}
output "rgnameout" {
value = azurerm_virtual_network.vnet1.resource_group_name
}
output "rglocationout" {
value = azurerm_virtual_network.vnet1.location
}
output "subnetout" {
value = azurerm_subnet.subnet1.id
}
I would like a guide on how to automatically deploy to a newly provisioned aks cluster after provisioning with terraform. for more context, i am building a one click full infrastructure provisioning and deployment all in one script. below is my structure for more understanding
main.tf
resource "azurerm_kubernetes_cluster" "aks" {
name = var.cluster_name
kubernetes_version = var.kubernetes_version
location = var.location
resource_group_name = var.resource_group_name
dns_prefix = var.cluster_name
default_node_pool {
name = "system"
node_count = var.system_node_count
vm_size = "Standard_DS2_v2"
type = "VirtualMachineScaleSets"
availability_zones = [1, 2, 3]
enable_auto_scaling = false
}
identity {
type = "SystemAssigned"
}
network_profile {
load_balancer_sku = "Standard"
network_plugin = "kubenet"
}
role_based_access_control {
enabled = true
}
}
output.tf
resource "local_file" "kubeconfig" {
depends_on = [azurerm_kubernetes_cluster.aks]
filename = "kubeconfig"
content = azurerm_kubernetes_cluster.aks.kube_config_raw
}
deployment.tf
resource "kubernetes_deployment" "sdc" {
metadata {
name = "sdc"
labels = {
app = "serviceName"
#version = "v1.0"
}
namespace = "default"
}
spec {
replicas = 1
selector {
match_labels = {
app = "serviceName"
}
}
template {
metadata {
labels = {
app = "serviceName"
# version = "v1.0"
}
}
spec {
container {
image = "myImage"
name = "serviceName"
port {
container_port = 80
}
}
}
}
}
depends_on = [
azurerm_kubernetes_cluster.aks
]
}
Everything works perfectly, my kubeconfig file is created and downloaded. my major headache is how to make the terraform apply process use the kubeconfig file created and also run the deployment. making my terraform script fully automated. I basically want to provision and deploy into the newly provisioned cluster all in one run.
Looking forward to good help.
Thanks guys
i'hv list of Vnet in azure and through count trying to peer from index[0] to rest all of vNet in list. Not sure how to put logic for rest vNet that count must start from index[1] instead [0].
While mentioning index.count , it is trying to peer with same vNet at last and throwing error.
Here is my code.
Variable.tf
===========
variable "rg" {
type= list(string)
description = " Name of Resource Group"
default = ["hub", "ansible", "spoke1", "spoke2", "spoke3", "spoke4", "spoke5"]
}
#------------------------------:Vnet's:--------------------------------------------
variable "vnet_name" {
description = "Vnet Details "
type = list(string)
default = ["hub_vnet", "ansible_vnet", "spoke1_vnet", "spoke2_vnet", "spoke3_vnet", "spoke4_vnet", "spoke5_vnet"]
}
Main.tf
========
resource "azurerm_virtual_network_peering" "az_to_rest" {
name = element(var.vnet_name, count.index)
resource_group_name = azurerm_resource_group.az_rg[0].name
virtual_network_name = azurerm_virtual_network.az_vnet[0].name
remote_virtual_network_id = azurerm_virtual_network.az_vnet[count.index].id
count = length(var.vnet_name)
}
I tested it for 3 vnets in 3 resource groups using the below code:
variable "rg" {
type= list(string)
description = " Name of Resource Group"
default = ["testgroup","hubtest","ansibletest"]
}
#------------------------------:Vnet's:--------------------------------------------
variable "vnet_name" {
description = "Vnet Details "
type = list(string)
default = ["ansuman_vnet","hub_vnet","ansible_vnet"]
}
provider "azurerm" {
features {}
}
data "azurerm_resource_group" "test" {
count = length(var.rg)
name = element(var.rg,count.index)
}
data "azurerm_virtual_network" "vnet" {
count = length(var.rg)
name = element(var.vnet_name, count.index)
resource_group_name = element(data.azurerm_resource_group.test.*.name, count.index)
}
resource "azurerm_virtual_network_peering" "az_to_rest" {
name = element(var.vnet_name, count.index)
resource_group_name = "${data.azurerm_resource_group.test.0.name}"
virtual_network_name = "${data.azurerm_virtual_network.vnet.0.name}"
remote_virtual_network_id = data.azurerm_virtual_network.vnet[count.index].id
count = length(var.vnet_name)
}
output:
As you can see from the above image, it errors out as its trying to peer with itself as well.
So , as a solution I have hard coded the virtual network name and resource group that I want to peer with other vnets and removed it from the list like below:
variable "rg" {
type= list(string)
description = " Name of Resource Group"
default = ["hubtest","ansibletest"]
}
#------------------------------:Vnet's:--------------------------------------------
variable "vnet_name" {
description = "Vnet Details "
type = list(string)
default = ["hub_vnet","ansible_vnet"]
}
provider "azurerm" {
features {}
}
data "azurerm_resource_group" "test" {
count = length(var.rg)
name = element(var.rg,count.index)
}
data "azurerm_virtual_network" "vnet" {
count = length(var.rg)
name = element(var.vnet_name, count.index)
resource_group_name = element(data.azurerm_resource_group.test.*.name, count.index)
}
resource "azurerm_virtual_network_peering" "az_to_rest" {
name = element(var.vnet_name, count.index)
resource_group_name = "testgroup" # resource group of vnet1
virtual_network_name = "ansuman_vnet"#vnet1
remote_virtual_network_id = data.azurerm_virtual_network.vnet[count.index].id
count = length(var.vnet_name)
}
Output:
main.tf wrote:
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">= 2.26"
}
}
}
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "rg" {
name = "Product-RG"
location = var.location
}
resource "azurerm_virtual_network" "vnet" {
resource_group_name = azurerm_resource_group.rg.name
name = "Product-VNet"
address_space = [lookup(var.vnetAddress, var.location)]
location = var.location
subnet {
name = "Web-Sub1"
address_prefix = ["${lookup(var.subnetAddress[var.location], "web1")}"]
}
subnet {
name = "Web-Sub2"
address_prefix = [lookup(var.subnetAddress[var.location], "web2")]
}
In Web-Sub1, i originally brought address_prefix like Web-Sub2, but now i'm trying like address_prefix on Web-Sub1 after the error occurred.
An error has occurred as below.
Error: Incorrect attribute value type
on main.tf line 27, in resource "azurerm_virtual_network" "vnet":
27: address_prefix = ["${lookup(var.subnetAddress[var.location], "web1")}"]
Inappropriate value for attribute "address_prefix": string required.
Error: Incorrect attribute value type
on main.tf line 31, in resource "azurerm_virtual_network" "vnet":
31: address_prefix = [lookup(var.subnetAddress[var.location], "web2")]
Inappropriate value for attribute "address_prefix": string required.
variable.tf wrote:
variable "location" {}
variable "vnetAddress" {
type = map
default = {
westus = "192.168.1.0/27"
eastus = "192.168.11.0/27"
}
}
variable "subnetAddress" {
type = map
default = {
westus = {
web1 = "192.168.1.0/27"
web2 = "192.168.1.32/27"
was1 = "192.168.1.64/27"
was2 = "192.168.1.96/27"
db1 = "192.168.1.128/27"
db2 = "192.168.1.160/27"
}
eastus = {
web1 = "192.168.11.0/27"
web2 = "192.168.11.32/27"
was1 = "192.168.11.64/27"
was2 = "192.168.11.96/27"
db1 = "192.168.11.128/27"
db2 = "192.168.11.160/27"
}
}
}
I wonder why there is an error that needs to be written in string format and why I can't bring the data.
You are almost there, just that address_prefix argument needs to be a string and you are passing a list of strings address_prefix = [lookup(var.subnetAddress[var.location], "web2")]
subnet {
name = "Web-Sub1"
address_prefix = lookup(var.subnetAddress[var.location], "web1")
}
subnet {
name = "Web-Sub2"
address_prefix = lookup(var.subnetAddress[var.location], "web2")
}
This should work.
Refer azurerm_virtual_network resource, address_prefix is passed as a string rather a list of strings.