How to fetch id of a variable using name in terraform? - terraform

I have a terraform resource in which I am trying to make a subnet_id variable dynamic. So I have varibles defined below in which subnet_id = "worker-subnet-1". I want to pass the name of the subnet and fetch the subnet id as I have multiple subnets. How can I do that.
resource "oci_containerengine_node_pool" "node_pool" {
for_each = var.nodepools
cluster_id = oci_containerengine_cluster.cluster[0].id
compartment_id = var.compartment_id
depends_on = [oci_containerengine_cluster.cluster]
kubernetes_version = var.cluster_kubernetes_version
name = each.value["name"]
node_config_details {
placement_configs {
availability_domain = var.availability_domain
subnet_id = oci_core_subnet.each.value["subnet_name"].id
}
size = each.value["size"]
}
node_shape = each.value["node_shape"]
node_shape_config {
#Optional
memory_in_gbs = each.value["memory"]
ocpus = each.value["ocpus"]
}
node_source_details {
image_id = each.value["image_id"]
source_type = "IMAGE"
}
ssh_public_key = file(var.ssh_public_key_path)
}
These are my variables:
nodepools = {
np1 = {
name = "np1"
size = 3
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
np2 = {
name = "np2"
size = 2
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
}
any suggestions?
resource "oci_core_subnet" "snet-workers" {
cidr_block = lookup(var.subnets["snet-workers"], "subnet_cidr")
compartment_id = var.compartment_id
vcn_id = oci_core_virtual_network.base_vcn.id
display_name = lookup(var.subnets["snet-workers"], "display_name")
dns_label = lookup(var.subnets["snet-workers"], "dns_label")
prohibit_public_ip_on_vnic = true
security_list_ids = [oci_core_security_list.private_worker_nodes.id]
route_table_id = oci_core_route_table.rt-nat.id
}

You have to use like below where change <local resource name> to the name you have given for your resource
subnet_id = oci_core_subnet.<local resource name>[each.value.subnet_id].id

Related

AKS creation with Terraform in Azure taking too much time

I want to deploy some AKS in Azure using Terraform but it is taking too much time to create more than 1 hour and the terraform job never finish. In the portal the AKS stay in Creating status. I'm deploying the AKS in the EastUS2 region
The vm size that I used are not too big so I don't know understand what could be the problem
This is my main file:
main.tf
# Configure the Microsoft Azure Provider.
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">= 2.26"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 1.24.6"
}
azapi = {
source = "azure/azapi"
version = ">=1.1.0"
}
}
required_version = ">= 0.14.9"
}
provider "azurerm" {
features {}
}
provider "kubernetes" {
host = module.aks.host
username = module.aks.username
password = module.aks.password
client_certificate = module.aks.client_certificate
client_key = base64decode(module.aks.client_key)
cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate)
}
provider "azapi" {
# subscription_id = data.azurerm_client_config.current.subscription_id
# tentat_id = data.azurerm_client_config.current.tenant_id
}
data "azurerm_client_config" "current" {}
module "ResourceGroup" {
source = "./ResourceGroup"
}
module "Networks" {
source = "./Networks"
resource_group_name = module.ResourceGroup.rg_name_out
location = module.ResourceGroup.rg_location_out
}
module "acr" {
source = "./ACR"
resource_group_name = module.ResourceGroup.rg_name_out
location = module.ResourceGroup.rg_location_out
name = var.acr_name
sku = var.acr_sku
admin_enabled = var.acr_admin_enabled
georeplication_locations = var.acr_georeplication_locations
soft_delete_policy_status = var.acr_soft_delete_policy_status
soft_delete_policy_days = var.acr_soft_delete_policy_days
identity_name = var.acr_identity_name
tags = var.tags
# depends_on = [module.StorageAccount]
}
module "aks" {
source = "./AKS"
location = module.ResourceGroup.rg_location_out
resource_group_name = module.ResourceGroup.rg_name_out
acr_id = module.acr.id
name = var.aks_cluster_name
kubernetes_version = var.aks_kubernetes_version
dns_prefix = lower(var.aks_cluster_name)
private_cluster_enabled = var.aks_private_cluster_enabled
automatic_channel_upgrade = var.aks_automatic_channel_upgrade
sku_tier = var.aks_sku_tier
identity_name = var.aks_identity_name
api_server_authorized_ip_ranges = [] #module.Networks.subnet_address_bastion
azure_policy_enabled = var.aks_azure_policy_enabled
http_application_routing_enabled = var.aks_http_application_routing_enabled
network_profile = var.aks_network_profile
aci_connector_linux = var.aks_aci_connector_linux
azure_ad_rbac_managed = var.aks_azure_ad_rbac_managed
tenant_id = data.azurerm_client_config.current.tenant_id
admin_group_object_ids = var.aks_admin_group_object_ids
azure_rbac_enabled = var.aks_azure_rbac_enabled
admin_username = var.aks_admin_username
ssh_public_key = var.aks_ssh_public_key
tags = var.tags
depends_on = [module.Networks, module.acr]
default_node_pool = {
name = "system"
vm_size = "Standard_D2s_v3"
node_count = 1
enable_auto_scaling = true
max_count = 1
min_count = 1
max_surge = "50%"
max_pods = 36
os_disk_size_gb = 50
os_disk_type = "Managed"
ultra_ssd_enabled = true
zones = ["1", "2","3"]
node_labels = { "workload" = "system" }
node_taints = [ "workload=system:NoSchedule" ]
vnet_subnet_id = module.Networks.subnet_id
orchestrator_version = var.aks_kubernetes_version
}
node_pools = [
{
name = "batch"
mode = "User"
vm_size = "Standard_D2s_v3"
node_count = 1
enable_auto_scaling = true
max_count = 1
min_count = 1
max_surge = "50%"
max_pods = 36
os_disk_size_gb = 50
os_disk_type = "Managed"
ultra_ssd_enabled = true
zones = ["1", "2","3"]
node_labels = { "workload" = "batch" }
node_taints = [ "workload=batch:NoSchedule" ]
vnet_subnet_id = module.Networks.subnet_id
orchestrator_version = var.aks_kubernetes_version
}
]
}

Azure event hubs metrics not appearing when deploying with terraform

When deploying with some other components (of a data pipeline) like function app and cosmosdb, metrics graphs on azure event hubs portal are not appearing instead graphs show "Resource not found".
But when I deploy the same terraform code for event hubs namespace without other components, metric graphs appears. Here is terraform code:
locals {
ip_rule_map = flatten([
for cidr in ["182.191.83.208"] : [
{
action = "Allow"
ip_mask = cidr
}
]
])
}
resource "azurerm_eventhub_namespace" "avro-ingestion" {
name = "test-eh"
location = "Central US"
resource_group_name = "test-rg"
sku = "Standard"
capacity = 1
network_rulesets = false ? [{
default_action = "Deny"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}] : [
{
default_action = "Allow"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}
]
tags = {
Name = "avro-ingestion"
Purpose = "data-ingestion-infra-deployment"
CreatedBy = "emumba"
}
}
resource "azurerm_eventhub_namespace_authorization_rule" "user_managed" {
name = "UserManagedSharedAccessKey"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
listen = true
send = true
manage = true
}
resource "null_resource" "schema-registry" {
depends_on = [
azurerm_eventhub_namespace.avro-ingestion
]
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
command = "az eventhubs namespace schema-registry create --name test-schema-group --namespace-name test-eh --resource-group test-rg --schema-compatibility Backward --schema-type Avro"
}
}
resource "azurerm_eventhub" "thunder" {
name = "test"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
partition_count = 2
message_retention = 1
}
resource "azurerm_eventhub_consumer_group" "function-app-cg" {
name = "fApp-cons-group"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
eventhub_name = azurerm_eventhub.thunder.name
resource_group_name = "test-rg"
}
main.tf file where I am calling all modules along with event hubs namespace module:
resource "random_string" "random_Sacc1" {
length = 4
special = false
upper = false
min_lower = 1
min_numeric = 1
}
resource "random_string" "random_Sacc2" {
length = 2
special = false
upper = false
min_lower = 1
min_numeric = 1
}
module "azure-resource-group" {
source = "../../modules/resource-group"
region = var.region
res_group_name = var.res_group_name
}
module "azure-virtual-network" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/virtual-network"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
vn_name = var.vn_name
vn_cidrs = var.vn_cidrs
subnets = var.subnets
subnet_cidrs = var.subnet_cidrs
pub_nsg_name = var.pub_nsg_name
private_nsg_name = var.private_nsg_name
internet_ip_cidr_list = var.internet_ip_cidr_list
}
module "azure-ad-app-registration" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/app-role-assignment"
app-display-name = var.app-display-name
rg_name = module.azure-resource-group.name
}
module "azure-eventhubs" {
source = "../../modules/event-hubs"
ns_name = var.eventhub_namespace_name
eventhub_name = var.eventhub_name
cons_group_name = var.cons_group_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
schema_group_name = var.eventhub_schema_group_name
}
module "azure-storage-account" {
depends_on = [
module.azure-virtual-network
]
source = "../../modules/storage-account"
storage_acc_name = "${var.storage_acc_name}${random_string.random_Sacc1.id}"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
}
module "azure-cosmos-db" {
source = "../../modules/cosmos-db"
acc_name = var.cosmos_acc_name
db_name = var.cosmos_db_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
cosmos_db_container_name = var.cosmos_db_container_name
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
synapse_link = var.enable_synapse_link
}
# module "fApp-azure-storage-account" {
# source = "../../modules/storage-account"
# storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
# rg_name = module.azure-resource-group.name
# rg_location = module.azure-resource-group.location
# enable_private_access = var.enable_private_access
# cidr_list = var.public_cidr_list
# private_ep_subnet = element(module.azure-virtual-network.subnet_id_list, 1)
# dns_zone_name = var.dns_zone_name_private_ep
# }
module "data-ingestion-fApp" {
depends_on = [
module.azure-cosmos-db,
module.azure-eventhubs,
module.azure-storage-account
]
source = "../../modules/function-app"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
application_insight_name = var.application_insight_name
fApp_service_plan_name = var.fApp_service_plan_name
fApp_name = var.fApp_name
fApp-storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
enable_private_access = var.enable_private_access
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
integration_vnet_name = module.azure-virtual-network.vnet-name
integration_subnet_name = module.azure-virtual-network.private-subent2-name
func_configurations = { "AZURE_CLIENT_ID" = module.azure-ad-app-registration.client_id
"AZURE_CLIENT_SECRET" = module.azure-ad-app-registration.client_secret,
"AZURE_TENANT_ID" = module.azure-ad-app-registration.tenant_id,
"EVENTHUB_NAME" = var.eventhub_name,
"EVENTHUB_FULLY_QUALIFIED_NAMESPACE" = "${var.eventhub_namespace_name}.servicebus.windows.net",
"SCHEMA_GROUP_NAME" = var.eventhub_schema_group_name,
"OUTPUT_CONTAINER" = var.storage_acc_container_name,
"OUTPUT_PATH" = var.storage_acc_container_path,
"COSMOS_DB_URI" = module.azure-cosmos-db.cosmos_account_uri,
"COSMOS_DB_PRIMARY_KEY" = module.azure-cosmos-db.cosmos_account_primary_key,
"COSMOS_DB_NAME" = var.cosmos_db_name,
"COSMOS_DB_CONTAINER_NAME" = var.cosmos_db_container_name,
"a10devops_namespace_connection" = module.azure-eventhubs.eventhub_conn_str,
"a10devops_storage_connection" = module.azure-storage-account.storage_account_conn_str }
}

terraform add multiple VM and change the default name of the resoruces

I need a bit of help as I have a terraform script and I want to add multiple VM and change the name of the network card like node_name-NIC and do the same thing even for the other resources but is failing and i cant fine the proper way to do it.
below is the terraform script
terraform {
required_providers {
azurerm = {
// source = "hashicorp/azurerm"
version = "=1.44"
}
}
}
locals {
rsname = "testing-new-terraform-modules"
node_name = ["server1","server2"]
clustersize = 2
node_size = "Standard_B4ms"
av_set_name = "Windows-AV-Set"
vnet_name = "VNET_1"
vnet_rg = "RG_VNET_D"
gw_subnet = "SUB_GW_INT"
vm_subnet = "SUB_WIN"
image_rg = "RG__TEMPLATE"
common_tags = {
lbuildingblock = "GENERAL"
customer = "IND"
}
}
module "resource_group" {
source = "../modules/resources/azure/data-resource-group"
rsname = local.rsname
}
data "azurerm_virtual_network" "virtual_network" {
name = local.vnet_name
resource_group_name = local.vnet_rg
}
# GatewayZone subnet, for the Load Balancer frontend IP address
module "gw_subnet" {
source = "../modules/resources/azure/data-subnet"
subnet-name = local.gw_subnet
vnet-name = data.azurerm_virtual_network.virtual_network.name
rs-name = data.azurerm_virtual_network.virtual_network.resource_group_name
}
module "windows_subnet" {
source = "../modules/resources/azure/data-subnet"
// We will use the SUB_LHIND_P_APP subnet, no need to create a new subnet just for two servers
subnet-name = local.vm_subnet
rs-name = local.vnet_rg
vnet-name = local.vnet_name
}
//data "azurerm_network_security_group" "app_nsg" {
//
// name = "SUB_LHIND_D_APP_NSG"
// resource_group_name = data.azurerm_virtual_network.virtual_network.resource_group_name
//}
module "nic" {
source = "../modules/resources/azure/network-interface"
location = module.resource_group.rs_group_location
name = "${local.node_name[0]}-NIC"
nic_count = local.clustersize
resource_group = module.resource_group.rs_group_name
subnet_id = module.windows_subnet.subnet_id
tags = local.common_tags
}
module "av_set" {
source = "../modules/resources/azure/availability-set"
av_name = local.av_set_name
resource_group = module.resource_group.rs_group_name
location = module.resource_group.rs_group_location
}
module "template_image" {
source = "../modules/resources/azure/data-templates"
template_name = "WindowsServer2019"
resource_group = local.image_rg
}
module "windows" {
source = "../modules/resources/azure/windows-server"
location = module.resource_group.rs_group_location
network_interface_ids = module.nic.nic_id
node_count = local.clustersize
node_name = local.node_name
node_size = local.node_size
av_set_id = module.av_set.availability_set_id
resource_group = module.resource_group.rs_group_name
template_id = module.template_image.template_id
username = var.username
password = var.password
domain_user = var.domain_user
domain_pass = var.domain_pass
}
is failing with the below error
Error: Invalid index
on ../modules/resources/azure/network-interface/main.tf line 10, in resource "azurerm_network_interface" "nic":
10: name = var.name[count.index]
|----------------
| count.index is 0
| var.name is "SW-AZLHIND-580-NIC"
This value does not have any indices.
and the resource Network-Interface is like below
resource "azurerm_network_interface" "nic" {
count = var.nic_count
location = var.location
name = var.name[count.index]
resource_group_name = var.resource_group
tags = var.tags
// network_security_group_id = var.network_security_group_id
ip_configuration {
name = var.name[count.index]
private_ip_address_allocation = "dynamic"
subnet_id = var.subnet_id
}
}
You can use the following:
name = "{var.name}-${count.index}"

Inappropriate value for "role":string required terraform12

I have written a terraform configuration file for a bastion entry point on an application.
ami = var.ami
ebs_optimized = var.ebs_optimized
iam_instance_profile = aws_iam_instance_profile.iam_instance_profile
instance_type = var.instance_type
key_name = "quadops"
subnet_id = var.subnet_id
user_data = var.user_data
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
volume_tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
vpc_security_group_ids = [aws_security_group.security_group.id]
}
resource "aws_security_group" "security_group" {
name = "${var.name}-security-group"
vpc_id = var.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_instance_profile" "iam_instance_profile" {
name = "${var.name}-iam-instance-profile"
role = aws_iam_role.iam_role
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_role" "iam_role" {
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
name = "${var.name}-iam-role"
tags = {
Name = "${var.name}-iam-role"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_eip" "eip" {
vpc = true
instance = aws_instance.instance.id
tags = {
Name = "${var.name}-eip"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "cloudflare_record" "record" {
zone_id = var.zone_id
name = "bastion.${var.env}"
type = "A"
value = "aws_eip.eip.public_ip"
}
Upon running plan, i'm getting this error.
on .terraform/modules/bastion/main.tf line 49, in resource "aws_iam_instance_profile" "iam_instance_profile":
49: role = aws_iam_role.iam_role
|----------------
| aws_iam_role.iam_role is object with 15 attributes
Inappropriate value for attribute "role": string required.
I can't seem to get over this hurdle as I think i'm calling the resource correctly but terraform12 says that it requires a string am I passing the values incorrectly? Thanks.
You are passing the entire aws_iam_role object to the role argument which is causing the error. Instead, try passing the name of the role like so:
resource "aws_iam_instance_profile" "iam_instance_profile" {
role = aws_iam_role.iam_role.name
}

ECS and Application Load Balancer Ephemeral Ports using Terraform

I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.

Resources