I have a code
resource "oci_core_security_list" "db_security_list" {
count = var.deploy_database ? 1 : 0
compartment_id = var.network_compartment_id
vcn_id = var.vcn_id
freeform_tags = { "Component" = "Database" }
display_name = "test-db-sl"
ingress_security_rules {
protocol = "6" # TCP
source = var.compute_subnet_cidr
tcp_options {
max = "1522"
min = "1522"
}
}
}
Currently the compute_subnet_cidr is having a single subnet cidr block
how can I iterate the if the compute_subnet_cidr is a list.
compute_subnet_cidr = ["10.10.10.0/24", "10.10.10.1/24", "10.10.10.2/24"]
How can I change the above code?
Yes, you can use dynamic blocks:
resource "oci_core_security_list" "db_security_list" {
count = var.deploy_database ? 1 : 0
compartment_id = var.network_compartment_id
vcn_id = var.vcn_id
freeform_tags = { "Component" = "Database" }
display_name = "test-db-sl"
dynamic "ingress_security_rules" {
for_each = var.compute_subnet_cidr
content {
protocol = "6" # TCP
source = ingress_security_rules.value
tcp_options {
max = "1522"
min = "1522"
}
}
}
}
Related
getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come
Is there a way to use the below list in a for loop and add in the target_groups ? I am trying to use the prefix with target_groups variable in a for-loop. I have tested also for_each. The target_groups expects the list format but the for_each does not give that expected result.
variable "prefix" {
description = "NLB Prefix"
type = any
default = "test-target"
}
variable "target_groups" {
description = "NLB"
type = any
default = {
tg1 = {
name_prefix = "test"
backend_protocol = "TCP"
backend_port = 443
target_type = "ip"
deregistration_delay = 10
preserve_client_ip = true
stickiness = {
enabled = true
type = "source_ip"
}
targets = {
appl1 = {
target_id = "191.11.11.11"
port = 443
}
}
},
}
}
}
I tried the list below for_each
module "g-appl_nlb" {
source = "../../modules/compute/lb"
name = format("%s-g-appl-nlb", var.name_prefix)
load_balancer_type = "network"
vpc_id = data.aws_vpc.target_vpc.id
...
target_groups = [
for_each = var.target_groups
name_previs = var.prefix
backend_protocol = each.value["backend_protocol"]
backend_port = each.value["backend_port"]
target_type = each.value["target_type"]
deregistration_delay = each.value["deregistration_delay"]
preserve_client_ip = each.value["preserve_client_ip"]
stickiness = each.value["stickiness"]
]
....
Basically, I managed the solved my request with the below approach.
locals {
target_groups = flatten([
for tg_data in var.target_groups: {
name_prefix = "var.name_prefix"
backend_protocol = tg_data.backend_protocol
backend_port = tg_data.backend_port
target_type = tg_data.target_type
deregistration_delay = tg_data.deregistration_delay
preserve_client_ip = tg_data.preserve_client_ip
....
])
}
module "g-appl_nlb" {
source = "../../modules/compute/lb"
name = format("%s-g-appl-nlb", var.name_prefix)
load_balancer_type = "network"
vpc_id = data.aws_vpc.target_vpc.id
...
target_groups = local.target_groups
I'm using the depends_on block with a condition check while creating object storage. Surprisingly, I saw the following error. Any pointers on how to resolve it?
code:
locals {
is_gov = local.realm == "oc2" || local.realm == "oc3" ? true : false
}
resource "oci_identity_compartment" "gov_comp" {
compartment_id = var.comp1
description = "GOV COMP"
name = "gov_comp"
defined_tags = { "Operations.CostCenter" = "001" }
freeform_tags = { "Department" = "Executives" }
}
resource "oci_identity_compartment" "non_gov_comp" {
compartment_id = var.comp3
description = "commerical comp"
name = "non_gov_cmop"
defined_tags = { "Operations.CostCenter" = "000" }
freeform_tags = { "Department" = "Non-Executives" }
}
resource "oci_objectstorage_bucket" "test_bucket" {
compartment_id = var.compartment_id
name = var.bucket_name
namespace = var.bucket_namespace
depends_on = is_gov ? [oci_identity_compartment.gov_comp] : [oci_identity_compartment.non_gov_comp]
}
Error:
depends_on = local.is_gov ? [oci_identity_compartment.gov_comp] : [
A static list expression is required.
Your gov_comp and non_gov_comp are always created toghether. They are not exclusive. Thus your test_bucket should be create when both these resources get created:
resource "oci_objectstorage_bucket" "test_bucket" {
compartment_id = var.compartment_id
name = var.bucket_name
namespace = var.bucket_namespace
depends_on = [oci_identity_compartment.gov_com, oci_identity_compartment.non_gov_comp]
}
I have a terraform code that is creating kubernetes cluster resource in Oracle cloud.
I want to ignore endpoint_config block when the cluster is public and execute this block when the cluster is private. How can I achieve that
resource "oci_containerengine_cluster" "cluster" {
count = var.deploy_oke_cluster ? 1 : 0
compartment_id = var.compartment_id
kubernetes_version = var.cluster_kubernetes_version
name = "oke-${var.environment}"
vcn_id = oci_core_virtual_network.base_vcn.id
endpoint_config {
is_public_ip_enabled = false
subnet_id = oci_core_subnet.snet-apiserver.id
}
options {
add_ons {
is_kubernetes_dashboard_enabled = true
is_tiller_enabled = false
}
kubernetes_network_config {
pods_cidr = var.pods_cidr
services_cidr = var.services_cidr
}
service_lb_subnet_ids = [oci_core_subnet.snet-pub-lb.id]
}
}
You can do this with dynamic blocks:
resource "oci_containerengine_cluster" "cluster" {
count = var.deploy_oke_cluster ? 1 : 0
compartment_id = var.compartment_id
kubernetes_version = var.cluster_kubernetes_version
name = "oke-${var.environment}"
vcn_id = oci_core_virtual_network.base_vcn.id
dynamic "endpoint_config" {
for_each = var.is_public == true ? [1] : []
content {
is_public_ip_enabled = false
subnet_id = oci_core_subnet.snet-apiserver.id
}
}
options {
add_ons {
is_kubernetes_dashboard_enabled = true
is_tiller_enabled = false
}
kubernetes_network_config {
pods_cidr = var.pods_cidr
services_cidr = var.services_cidr
}
service_lb_subnet_ids = [oci_core_subnet.snet-pub-lb.id]
}
}
I am looking for a way to differentiate the tag names in my module, and know that there is a road block there as you can't use count in modules, I would like to increment my tag name, based of the position of the public subnet, how would I do this?
variable "public_subnet_count" {
default = 3
}
variable "public_subnet_name" {
default = {
0 = "cp-net0"
1 = "k8s-net0"
2 = "um-net0"
}
}
module "vpc" {
source = "github.com/terraform-aws-modules/terraform-aws-vpc"
name = "apigee"
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]
assign_generated_ipv6_cidr_block = true
enable_nat_gateway = true
single_nat_gateway = true
public_subnet_tags = {
Name = lookup(var.public_subnet_name, count.index) // will not work
}
tags = {
Owner = "212743998"
Environment = "sandbox"
}
vpc_tags = {
Name = "apigee"
}
}