I can use ad input of module like variable to tag volume? I've this block:
module "jenkins" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "3.4.0"
name = "Jenkins"
ami = data.aws_ami.amazon_linux.id
instance_type = "t3.nano"
availability_zone = element(module.vpc.azs, 0)
subnet_id = element(module.vpc.public_subnets, 0)
vpc_security_group_ids = [module.jenkins-sg.security_group_id]
associate_public_ip_address = true
key_name = "my_key"
user_data = file("userdata/init.sh")
enable_volume_tags = false
root_block_device = [
{
encrypted = false
volume_type = "gp3"
volume_size = 15
throughput = 150
tags = {
Name = "${name}-root"
terraform = "true"
user_type = "ebs"
}
},
]
ebs_block_device = [
{
encrypted = false
device_name = "/dev/sdf"
volume_type = "gp3"
volume_size = 50
throughput = 150
}
]
volume_tags = {
Name = "${name}-data"
terraform = "true"
user_type = "ebs"
}
tags = {
terraform = "true"
user_type = "ec2"
}
}
As seen in "$ {name} -root" and "$ {name} -data", is it possible to tag with the input name "name"?
Thanks
You can't reference this module variable likes this. However, you can use a input variable (can be overridden from outside) or a local value (fixed value) to achieve this.
input variable
variable "ec2_instance_name" {
type = string
description = "The name of the ec2 instance"
default = "jenkins"
}
module "jenkins" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "3.4.0"
name = var.ec2_instance_name
ami = data.aws_ami.amazon_linux.id
instance_type = "t3.nano"
availability_zone = element(module.vpc.azs, 0)
subnet_id = element(module.vpc.public_subnets, 0)
vpc_security_group_ids = [module.jenkins-sg.security_group_id]
associate_public_ip_address = true
key_name = "my_key"
user_data = file("userdata/init.sh")
enable_volume_tags = false
root_block_device = [
{
encrypted = false
volume_type = "gp3"
volume_size = 15
throughput = 150
tags = {
Name = "${var.ec2_instance_name}-root"
terraform = "true"
user_type = "ebs"
}
},
]
ebs_block_device = [
{
encrypted = false
device_name = "/dev/sdf"
volume_type = "gp3"
volume_size = 50
throughput = 150
}
]
volume_tags = {
Name = "${var.ec2_instance_name}-data"
terraform = "true"
user_type = "ebs"
}
tags = {
terraform = "true"
user_type = "ec2"
}
}
local value
locals {
ec2_instance_name = "jenkins"
}
module "jenkins" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "3.4.0"
name = local.ec2_instance_name
ami = data.aws_ami.amazon_linux.id
instance_type = "t3.nano"
availability_zone = element(module.vpc.azs, 0)
subnet_id = element(module.vpc.public_subnets, 0)
vpc_security_group_ids = [module.jenkins-sg.security_group_id]
associate_public_ip_address = true
key_name = "my_key"
user_data = file("userdata/init.sh")
enable_volume_tags = false
root_block_device = [
{
encrypted = false
volume_type = "gp3"
volume_size = 15
throughput = 150
tags = {
Name = "${local.ec2_instance_name}-root"
terraform = "true"
user_type = "ebs"
}
},
]
ebs_block_device = [
{
encrypted = false
device_name = "/dev/sdf"
volume_type = "gp3"
volume_size = 50
throughput = 150
}
]
volume_tags = {
Name = "${local.ec2_instance_name}-data"
terraform = "true"
user_type = "ebs"
}
tags = {
terraform = "true"
user_type = "ec2"
}
}
Related
I have a terraform code that create an EC2 type Batch job , and my aws batch job download some data a total of 50GB ,how to add that storage space to my instance in terrafrom ? and if there is another way to add that storage
This is my terrafrom code
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus}
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
If you want you may be able to mount a shared EFS drive you could try something like this. Keep in mind I have not tested this & you will need to replace certain parameters with your subnet-ids, vpc-id, etc:
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus},
"mountPoints": [
{
readOnly = null,
containerPath = "/var/batch"
sourceVolume = "YOUR-FILE-SYSTEM-NAME"
}
]
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
resource "aws_efs_file_system" "general" {
creation_token = "YOUR-FILE-SYSTEM-NAME"
#kms_key_id = module.kms.arn
#encrypted = true
encrypted = false
performance_mode = "generalPurpose"
throughput_mode = "provisioned"
provisioned_throughput_in_mibps = 8
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
}
resource "aws_efs_access_point" "general" {
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
file_system_id = aws_efs_file_system.general.id
root_directory {
path = "/YOUR-FILE-SYSTEM-NAME"
creation_info {
owner_gid = "1000"
owner_uid = "1000"
permissions = "755"
}
}
posix_user {
uid = "1000"
gid = "1000"
}
}
## FOR REDUNDANCY
## It is a good idea to add a mount target per AZ you use
resource "aws_efs_mount_target" "a" {
source = "app.terraform.io/popreach/efs-mount-target/aws"
version = "1.0.0"
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-A
security_groups = [aws_security_group.general.id]
}
resource "aws_efs_mount_target" "b" {
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-B
security_groups = [aws_security_group.general.id]
}
resource "aws_security_group" "general" {
name = YOUR-SECURITY-GROUP-NAME
vpc_id = YOUR-VPC-ID
tags = {Name = YOUR-SECURITY-GROUP-NAME}
}
resource "aws_security_group_rule" "ingress" {
type = "ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "2049"
to_port = "2049"
protocol = "tcp"
security_group_id = aws_security_group.general.id
}
resource "aws_security_group_rule" "egress" {
type = "egress"
description = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "0"
to_port = "0"
protocol = "all"
security_group_id = aws_security_group.general.id
}
You'll be able to mount your EFS drive on any EC2 Default AMZN Linux Instance like this: mkdir /data/efs && mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport REPLACE_WITH_EFS_DNS:/ /data/efs
When deploying with some other components (of a data pipeline) like function app and cosmosdb, metrics graphs on azure event hubs portal are not appearing instead graphs show "Resource not found".
But when I deploy the same terraform code for event hubs namespace without other components, metric graphs appears. Here is terraform code:
locals {
ip_rule_map = flatten([
for cidr in ["182.191.83.208"] : [
{
action = "Allow"
ip_mask = cidr
}
]
])
}
resource "azurerm_eventhub_namespace" "avro-ingestion" {
name = "test-eh"
location = "Central US"
resource_group_name = "test-rg"
sku = "Standard"
capacity = 1
network_rulesets = false ? [{
default_action = "Deny"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}] : [
{
default_action = "Allow"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}
]
tags = {
Name = "avro-ingestion"
Purpose = "data-ingestion-infra-deployment"
CreatedBy = "emumba"
}
}
resource "azurerm_eventhub_namespace_authorization_rule" "user_managed" {
name = "UserManagedSharedAccessKey"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
listen = true
send = true
manage = true
}
resource "null_resource" "schema-registry" {
depends_on = [
azurerm_eventhub_namespace.avro-ingestion
]
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
command = "az eventhubs namespace schema-registry create --name test-schema-group --namespace-name test-eh --resource-group test-rg --schema-compatibility Backward --schema-type Avro"
}
}
resource "azurerm_eventhub" "thunder" {
name = "test"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
partition_count = 2
message_retention = 1
}
resource "azurerm_eventhub_consumer_group" "function-app-cg" {
name = "fApp-cons-group"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
eventhub_name = azurerm_eventhub.thunder.name
resource_group_name = "test-rg"
}
main.tf file where I am calling all modules along with event hubs namespace module:
resource "random_string" "random_Sacc1" {
length = 4
special = false
upper = false
min_lower = 1
min_numeric = 1
}
resource "random_string" "random_Sacc2" {
length = 2
special = false
upper = false
min_lower = 1
min_numeric = 1
}
module "azure-resource-group" {
source = "../../modules/resource-group"
region = var.region
res_group_name = var.res_group_name
}
module "azure-virtual-network" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/virtual-network"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
vn_name = var.vn_name
vn_cidrs = var.vn_cidrs
subnets = var.subnets
subnet_cidrs = var.subnet_cidrs
pub_nsg_name = var.pub_nsg_name
private_nsg_name = var.private_nsg_name
internet_ip_cidr_list = var.internet_ip_cidr_list
}
module "azure-ad-app-registration" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/app-role-assignment"
app-display-name = var.app-display-name
rg_name = module.azure-resource-group.name
}
module "azure-eventhubs" {
source = "../../modules/event-hubs"
ns_name = var.eventhub_namespace_name
eventhub_name = var.eventhub_name
cons_group_name = var.cons_group_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
schema_group_name = var.eventhub_schema_group_name
}
module "azure-storage-account" {
depends_on = [
module.azure-virtual-network
]
source = "../../modules/storage-account"
storage_acc_name = "${var.storage_acc_name}${random_string.random_Sacc1.id}"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
}
module "azure-cosmos-db" {
source = "../../modules/cosmos-db"
acc_name = var.cosmos_acc_name
db_name = var.cosmos_db_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
cosmos_db_container_name = var.cosmos_db_container_name
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
synapse_link = var.enable_synapse_link
}
# module "fApp-azure-storage-account" {
# source = "../../modules/storage-account"
# storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
# rg_name = module.azure-resource-group.name
# rg_location = module.azure-resource-group.location
# enable_private_access = var.enable_private_access
# cidr_list = var.public_cidr_list
# private_ep_subnet = element(module.azure-virtual-network.subnet_id_list, 1)
# dns_zone_name = var.dns_zone_name_private_ep
# }
module "data-ingestion-fApp" {
depends_on = [
module.azure-cosmos-db,
module.azure-eventhubs,
module.azure-storage-account
]
source = "../../modules/function-app"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
application_insight_name = var.application_insight_name
fApp_service_plan_name = var.fApp_service_plan_name
fApp_name = var.fApp_name
fApp-storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
enable_private_access = var.enable_private_access
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
integration_vnet_name = module.azure-virtual-network.vnet-name
integration_subnet_name = module.azure-virtual-network.private-subent2-name
func_configurations = { "AZURE_CLIENT_ID" = module.azure-ad-app-registration.client_id
"AZURE_CLIENT_SECRET" = module.azure-ad-app-registration.client_secret,
"AZURE_TENANT_ID" = module.azure-ad-app-registration.tenant_id,
"EVENTHUB_NAME" = var.eventhub_name,
"EVENTHUB_FULLY_QUALIFIED_NAMESPACE" = "${var.eventhub_namespace_name}.servicebus.windows.net",
"SCHEMA_GROUP_NAME" = var.eventhub_schema_group_name,
"OUTPUT_CONTAINER" = var.storage_acc_container_name,
"OUTPUT_PATH" = var.storage_acc_container_path,
"COSMOS_DB_URI" = module.azure-cosmos-db.cosmos_account_uri,
"COSMOS_DB_PRIMARY_KEY" = module.azure-cosmos-db.cosmos_account_primary_key,
"COSMOS_DB_NAME" = var.cosmos_db_name,
"COSMOS_DB_CONTAINER_NAME" = var.cosmos_db_container_name,
"a10devops_namespace_connection" = module.azure-eventhubs.eventhub_conn_str,
"a10devops_storage_connection" = module.azure-storage-account.storage_account_conn_str }
}
Maybe this is possible, maybe it's not. I'm attempting to mount an EFS target using some of the values stored in a var.ec2_server map which includes subnets, EBS volumes, etc.
The issue I've run into is that I created the EFS File System using a for_each statement; since the efs_file_system was created with a for_each, I must reference the attributes within specified instances when referring to the resource in other variables.
The file_system_id is only known after creation so how would I reference it within a map or other variable inside other for_each statements, such as the aws_efs_mount_target resource defined below? Will what I'm doing even work?
I'm using the antiquated resource.tf > variable.tf > terraform.tfvars (config) style code :
...the ec2.tf file:
###############################################################################
# EC2 Instance
resource "aws_instance" "ec2" {
for_each = var.ec2_servers
ami = data.aws_ami.ec2[each.key].id
disable_api_termination = var.disable_api_termination
iam_instance_profile = aws_iam_instance_profile.ec2[each.key].id
instance_type = each.value.instance_type
monitoring = true
vpc_security_group_ids = [aws_security_group.ec2[each.key].id]
subnet_id = each.value.subnet_name != null ? aws_subnet.private["${each.value.vpc_name}.${each.value.subnet_name}.${each.value.availability_zone}"].id : null
key_name = aws_key_pair.ec2.key_name
user_data = each.value.user_data == "" ? null : templatefile("./${each.value.user_data}", { region = data.aws_region.current.name })
private_ip = each.value.private_ip
metadata_options {
http_endpoint = "enabled"
http_tokens = "required"
}
root_block_device {
delete_on_termination = true
encrypted = true
volume_size = each.value.root_volume_size
volume_type = "gp2"
tags = {
Name = replace("${var.project_name}-${each.value.vpc_name}-${each.key}-EBS", " ", "")
}
}
dynamic "ebs_block_device" {
for_each = each.value.ebs_volumes
content {
volume_type = ebs_block_device.value.volume_type
volume_size = ebs_block_device.value.volume_size
device_name = ebs_block_device.value.device_name
tags = {
Name = replace("${var.project_name}-${each.value.vpc_name}-${each.key}-EBS", " ", "") }
}
}
tags = {
Name = replace("${var.project_name}-${each.value.vpc_name}-${each.key}-EC2", " ", "")
Backup = "true"
}
}
...the efs.tf file:
###############################################################################
# Create EFS File System
resource "aws_efs_file_system" "efs" {
for_each = {
for object, property in var.efs_config : object => property if var.efs_config.efs_enabled
}
creation_token = var.efs_config.efs_creation_token
encrypted = var.efs_config.efs_encrypt
kms_key_id = aws_kms_key.efs_kms.arn
tags = {
Name = replace("${var.project_name}-${var.efs_config.efs_vpc}-EFS", " ", "")
}
}
resource "aws_efs_backup_policy" "efs_backup_policy" {
file_system_id = "NEEDS TO BE DETERMINED"
backup_policy {
status = "ENABLED"
}
}
resource "aws_efs_mount_target" "efs_mount_target" {
for_each = var.ec2_servers
file_system_id = "NEEDS TO BE DETERMINED"
subnet_id = each.value.subnet_name == "app" ? aws_subnet.private["${each.value.vpc_name}.${each.value.subnet_name}.${each.value.availability_zone}"].id : null
ip_address = lookup(var.efs_config, "efs_private_ip")
security_groups = [aws_security_group.ec2[each.key].id]
}
...the variables.tf file:
variable "ec2_servers" {
description = "A configurable map of EC2 settings."
type = map(any)
}
...the terraform.tfvars file:
###############################################################################
# EFS Configurations
efs_config = {
efs_enabled = true
efs_creation_token = "Prod_EFS"
efs_encrypt = true
efs_vpc = "Prod"
efs_private_ip = "10.200.0.5"
}
# Server Configurations
ec2_servers = {
EC201 = {
ami_owner = "XXXXXXXXXXXX"
ami_name = "xxxxx-xxxxxx"
instance_type = "t2.micro"
root_volume_size = "10"
ebs_volumes = [
{
volume_size = "20"
volume_type = "gp3"
device_name = "/dev/xvdba"
},
{
volume_size = "20"
volume_type = "gp3"
device_name = "/dev/xvdbb"
}
]
vpc_name = "Prod"
subnet_name = "web"
set_ec2_hostname = false
ec2_hostname = "xxxxxxxxx"
availability_zone = "a"
public_dns = false
private_dns = true
policy_names = []
s3_storage = false
transfer_files = false
user_data = "setup_ssm_linux.tftpl"
private_ip = "10.200.0.132"
ingress = {
ssh = {
description = "Internal address"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.200.0.0/22"
]
}
}
}
}
I've tried a number of things such as creating a data resource for aws_efs_mount_target and nothing I do seems to work. If anyone could provide a little insight, both my project leads and myself would be greatly appreciated!
If I missed anything here, please let me know and I will update the question with the relevant information.
Your aws_efs_backup_policy needs a for_each also, since you need to create one for each EFS volume:
resource "aws_efs_backup_policy" "efs_backup_policy" {
for_each = aws_efs_file_system.efs
file_system_id = each.id
backup_policy {
status = "ENABLED"
}
}
For your EFS mount target, I would use the same for_each you use for the EFS volumes:
resource "aws_efs_mount_target" "efs_mount_target" {
for_each = {
for object, property in var.efs_config : object => property if var.efs_config.efs_enabled
}
file_system_id = aws_efs_file_system.efs[each.key].id
...
}
I think you need to clean up those other lookups in aws_efs_mount_target by moving those values into the efs_config var.
I have a terraform resource in which I am trying to make a subnet_id variable dynamic. So I have varibles defined below in which subnet_id = "worker-subnet-1". I want to pass the name of the subnet and fetch the subnet id as I have multiple subnets. How can I do that.
resource "oci_containerengine_node_pool" "node_pool" {
for_each = var.nodepools
cluster_id = oci_containerengine_cluster.cluster[0].id
compartment_id = var.compartment_id
depends_on = [oci_containerengine_cluster.cluster]
kubernetes_version = var.cluster_kubernetes_version
name = each.value["name"]
node_config_details {
placement_configs {
availability_domain = var.availability_domain
subnet_id = oci_core_subnet.each.value["subnet_name"].id
}
size = each.value["size"]
}
node_shape = each.value["node_shape"]
node_shape_config {
#Optional
memory_in_gbs = each.value["memory"]
ocpus = each.value["ocpus"]
}
node_source_details {
image_id = each.value["image_id"]
source_type = "IMAGE"
}
ssh_public_key = file(var.ssh_public_key_path)
}
These are my variables:
nodepools = {
np1 = {
name = "np1"
size = 3
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
np2 = {
name = "np2"
size = 2
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
}
any suggestions?
resource "oci_core_subnet" "snet-workers" {
cidr_block = lookup(var.subnets["snet-workers"], "subnet_cidr")
compartment_id = var.compartment_id
vcn_id = oci_core_virtual_network.base_vcn.id
display_name = lookup(var.subnets["snet-workers"], "display_name")
dns_label = lookup(var.subnets["snet-workers"], "dns_label")
prohibit_public_ip_on_vnic = true
security_list_ids = [oci_core_security_list.private_worker_nodes.id]
route_table_id = oci_core_route_table.rt-nat.id
}
You have to use like below where change <local resource name> to the name you have given for your resource
subnet_id = oci_core_subnet.<local resource name>[each.value.subnet_id].id
I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.