I'm new to Terraform and unable to deploy the below and banging my head against it. The Terraform is unable to deploy the correct resources to my environment and unsure of what I've missed. Any help is appreciated. Code is below.
Thanks (Any advice in general on Terraform is appreciated. I can tell its going to be something very obvious but a one hour exercise has been bothering me all day!)
provider "aws" {
region="us-east-1"
}
data "aws_vpc" "vpc" {
tags = {
Name = "vpc"
}
}
data "aws_subnet_ids" "ecommerce" {
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_db_subnet_group" "aurora" {
name = "aurora"
subnet_ids = ["${data.aws_subnet_ids.ecommerce.ids}"]
tags = {
Name = "database"
}
}
resource "aws_security_group" "database" {
name = "database"
description = "ecommerce database"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_security_group" "web" {
name = "web"
description = "ecommerce web"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "web"
}
}
resource "aws_security_group_rule" "web_allow_https_in" {
description = "HTTPS access"
type = "ingress"
from_port = 443
to_port=443
protocol = "tcp"
cidr_blocks = ["8.8.8.8/32"]
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "web_allow_mysql_out" {
description = "MySQL access"
type = "egress"
from_port= 3306
to_port = 3306
protocol = "tcp"
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "db_allow_web_in" {
description = "Web access"
type = "ingress"
from_port = 3306
to_port = 3306
protocol="tcp"
security_group_id = "${aws_security_group.database.id}"
source_security_group_id = "${aws_security_group.web.id}"
}
data "aws_ami" "web" {
owners = ["self"]
most_recent = true
filter {
name = "name"
values = ["web*"]
}
}
resource "aws_instance" "web" {
ami = "${data.aws_ami.web.id}"
ebs_optimized = true
instance_type = "t3.micro"
associate_public_ip_address = false
vpc_security_group_ids = ["${aws_security_group.web.id}"]
subnet_id = "${data.aws_subnet_ids.ecommerce.ids[0]}"
ebs_block_device = {
device_name = "/dev/sdb"
volume_type = "gp2"
volume_size = "16"
delete_on_termination = true
}
tags = {
Name = "Web"
}
lifecycle {
ignore_changes = ["ami"]
}
}
resource "aws_rds_cluster" "aurora" {
apply_immediately = true
backup_retention_period = 7
cluster_identifier = "mydatabase"
engine = "aurora"
database_name = "main"
storage_encrypted=true
kms_key_id = "arn:aws**"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
final_snapshot_identifier = "final"
master_password = "*"
master_username = "*"
vpc_security_group_ids = ["${aws_security_group.database.id}"]
}
resource "aws_rds_cluster_instance" "aurora" {
count = 2
apply_immediately = true
identifier = "mydatabase-${count.index}"
cluster_identifier = "${aws_rds_cluster.aurora.id}"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
instance_class = "db.r12.large"
publicly_accessible = false
}
resource "aws_eip" "eip" {
vpc = true
}
resource "aws_eip_association" "assoc" {
instance_id = "${aws_instance.web.id}"
allocation_id = "${aws_eip.eip.id}"
}
data "aws_route53_zone" "zone" {
name = "ecommerce.com"
}
resource "aws_route53_record" "record" {
zone_id = "${data.aws_route53_zone.zone.zone_id}"
type="A"
name = "www.ecommerce.com"
ttl = 300
records = ["${aws_eip.eip.public_ip}"]
}
Related
I have created a application gateway, WAF policy, public IP via terraform.
From Azure GUI I have created a Key vault in which I have uploaded the pfx certificate also I have created managed identity and granted full access to azure key vault.
I am trying to create a additional https listener and calling the certificate stored in the keyvault via data block but somehow landing in this error .
Note: Kayvault , managed identity , appgw, waf policy are all in same region.
Error :
│ Error: updating Application Gateway: (Name "abc-xyz-Nonprod-test-us6-Extappgw0001" / Resource Group "xyz-network-vnet-devtest"): network.ApplicationGatewaysClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="InvalidResourceReference" Message="Resource /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xyz-network-vnet-devtest/providers/Microsoft.Network/applicationGateways/abc-xyz-Nonprod-test-us6-Extappgw0001/sslCertificates/firepfx referenced by resource /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/xyz-network-vnet-devtest/providers/Microsoft.Network/applicationGateways/abc-xyz-Nonprod-test-us6-Extappgw0001/httpListeners/External_app_gtw_nonprod_backend_listener_https was not found. Please make sure that the referenced resource exists, and that both resources are in the same region." Details=[]
│
│ with azurerm_application_gateway.abc-xyz-Nonprod-test-us6-Extappgw0001,
│ on abc-xyz-Nonprod-test-us6-Extappgw0001.tf line 102, in resource "azurerm_application_gateway" "abc-xyz-Nonprod-test-us6-Extappgw0001":
│ 102: resource "azurerm_application_gateway" "abc-xyz-Nonprod-test-us6-Extappgw0001"
code
terraform {
backend "azurerm" {
storage_account_name = "abccloudlbstorage"
resource_group_name = "xyz-NETENG-AppResources-Prod"
container_name = "testlb"
tenant_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
subscription_id = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
key = "abc-xyz-Nonprod-test-us6-Extappgw0001.tfstate"
}
}
provider "azurerm" {
features {}
}
data "azurerm_client_config" "current" {}
data "azurerm_subnet" "abc-xyz-devtest-us6-vnet00002-sub00001-AppGW" {
name = "abc-xyz-devtest-us6-vnet00002-sub00001-AppGW"
resource_group_name = "xyz-network-vnet-devtest"
virtual_network_name = "abc-xyz-devtest-us6-vnet00002"
}
data "azurerm_user_assigned_identity" "test-appgw-identity-us6"{
name = "test-appgw-identity-us6"
resource_group_name = "xyz-network-vnet-devtest"
}
data "azurerm_key_vault" "xyz-network-kv" {
name = "xyz-network-kv"
resource_group_name = "xyz-network-vnet-devtest"
}
data "azurerm_key_vault_certificate" "firepfx" {
name = "firepfx"
key_vault_id = data.azurerm_key_vault.xyz-network-kv.id
}
resource "azurerm_public_ip" "abc-test-us6-nonprod-FE0001" {
name = "abc-test-us6-nonprod-FE0001"
resource_group_name = "xyz-network-vnet-devtest"
location = "eastus2"
allocation_method = "Static"
sku = "Standard"
zones = ["1", "2", "3"]
tags = {
BusinessUnit = "enterprise-management"
LineOfBusiness = "xyz"
}
}
resource "azurerm_web_application_firewall_policy" "abc-test-us6-nonprod-WFW0001" {
name = "abc-test-us6-nonprod-WFW0001"
resource_group_name = "xyz-network-vnet-devtest"
location = "eastus2"
tags = {
BusinessUnit = "enterprise-management"
LineOfBusiness = "xyz"
}
custom_rules {
name = "Rule1"
priority = 1
rule_type = "MatchRule"
match_conditions {
match_variables {
variable_name = "RemoteAddr"
}
operator = "IPMatch"
negation_condition = false
match_values = ["8.8.8.8"]
}
action = "Block"
}
policy_settings {
enabled = true
mode = "Prevention"
request_body_check = true
file_upload_limit_in_mb = 100
max_request_body_size_in_kb = 128
}
managed_rules {
exclusion {
match_variable = "RequestHeaderNames"
selector = "x-company-secret-header"
selector_match_operator = "Equals"
}
managed_rule_set {
type = "OWASP"
version = "3.2"
}
}
}
resource "azurerm_application_gateway" "abc-xyz-Nonprod-test-us6-Extappgw0001" {
name = "abc-xyz-Nonprod-test-us6-Extappgw0001"
resource_group_name = "xyz-network-vnet-devtest"
location = "eastus2"
zones = ["1", "2", "3"]
firewall_policy_id = azurerm_web_application_firewall_policy.abc-test-us6-nonprod-WFW0001.id
tags = {
BusinessUnit = "enterprise-management"
LineOfBusiness = "xyz"
}
sku {
name = "WAF_v2"
tier = "WAF_v2"
}
autoscale_configuration {
min_capacity = 2
max_capacity = 10
}
gateway_ip_configuration {
name = "abc-test-us6-nonprod-GIP0001"
subnet_id = data.azurerm_subnet.abc-xyz-devtest-us6-vnet00002-sub00001-AppGW.id
}
frontend_port {
name = "abc-us6-gpt-nonprod-PRT-FE0001"
port = 80
}
frontend_ip_configuration {
name = "abc-test-us6-nonprod-CFG-FE0001"
public_ip_address_id = azurerm_public_ip.abc-test-us6-nonprod-FE0001.id
}
frontend_ip_configuration {
name = "abc-test-us6-nonprod-CFG-FE0002"
subnet_id = data.azurerm_subnet.abc-xyz-devtest-us6-vnet00002-sub00001-AppGW.id
private_ip_address = "10.46.72.200"
private_ip_address_allocation = "Static"
}
backend_address_pool {
name = "External_app_gtw_nonprod_backend"
}
backend_http_settings {
name = "External_app_gtw_nonprod_http_setting"
cookie_based_affinity = "Disabled"
path = "/"
port = 80
protocol = "Http"
request_timeout = 60
}
http_listener {
name = "External_app_gtw_nonprod_backend_listener"
frontend_ip_configuration_name = "abc-test-us6-nonprod-CFG-FE0001"
frontend_port_name = "abc-us6-gpt-nonprod-PRT-FE0001"
protocol = "Http"
}
request_routing_rule {
name = "External_app_gtw_nonprod_RR"
rule_type = "Basic"
http_listener_name = "External_app_gtw_nonprod_backend_listener"
backend_address_pool_name = "External_app_gtw_nonprod_backend"
backend_http_settings_name = "External_app_gtw_nonprod_http_setting"
priority = 1
}
frontend_port {
name = "abc-us6-gpt-nonprod-PRT-FE00011"
port = 443
}
backend_http_settings {
name = "External_app_gtw_nonprod_https_setting"
cookie_based_affinity = "Disabled"
path = "/"
port = 443
protocol = "Https"
request_timeout = 60
host_name = "irms.abc.com"
}
http_listener {
name = "External_app_gtw_nonprod_backend_listener_https"
frontend_ip_configuration_name = "abc-test-us6-nonprod-CFG-FE0001"
frontend_port_name = "abc-us6-gpt-nonprod-PRT-FE00011"
protocol = "Https"
ssl_certificate_name = data.azurerm_key_vault_certificate.firepfx.name
}
identity {
type = "UserAssigned"
identity_ids = [data.azurerm_user_assigned_identity.test-appgw-identity-us6.id]
}
request_routing_rule {
name = "External_app_gtw_nonprod_https"
rule_type = "Basic"
http_listener_name = "External_app_gtw_nonprod_backend_listener_https"
backend_address_pool_name = "External_app_gtw_nonprod_backend"
backend_http_settings_name = "External_app_gtw_nonprod_https_setting"
priority = 3
}
}
For Application Gateway, you have to create an ssl_certificate block that references the Key Vault secret ID under the key_vault_secret_id property. Then your listener will reference the name of this ssl_certificate resource instead of the locals variable you declared.
ssl_certificate {
name = "cert2023"
key_vault_secret_id "https://mykv.vault.azure.net/secrets/cert2023"
}
I have a terraform code that create an EC2 type Batch job , and my aws batch job download some data a total of 50GB ,how to add that storage space to my instance in terrafrom ? and if there is another way to add that storage
This is my terrafrom code
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus}
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
If you want you may be able to mount a shared EFS drive you could try something like this. Keep in mind I have not tested this & you will need to replace certain parameters with your subnet-ids, vpc-id, etc:
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus},
"mountPoints": [
{
readOnly = null,
containerPath = "/var/batch"
sourceVolume = "YOUR-FILE-SYSTEM-NAME"
}
]
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
resource "aws_efs_file_system" "general" {
creation_token = "YOUR-FILE-SYSTEM-NAME"
#kms_key_id = module.kms.arn
#encrypted = true
encrypted = false
performance_mode = "generalPurpose"
throughput_mode = "provisioned"
provisioned_throughput_in_mibps = 8
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
}
resource "aws_efs_access_point" "general" {
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
file_system_id = aws_efs_file_system.general.id
root_directory {
path = "/YOUR-FILE-SYSTEM-NAME"
creation_info {
owner_gid = "1000"
owner_uid = "1000"
permissions = "755"
}
}
posix_user {
uid = "1000"
gid = "1000"
}
}
## FOR REDUNDANCY
## It is a good idea to add a mount target per AZ you use
resource "aws_efs_mount_target" "a" {
source = "app.terraform.io/popreach/efs-mount-target/aws"
version = "1.0.0"
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-A
security_groups = [aws_security_group.general.id]
}
resource "aws_efs_mount_target" "b" {
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-B
security_groups = [aws_security_group.general.id]
}
resource "aws_security_group" "general" {
name = YOUR-SECURITY-GROUP-NAME
vpc_id = YOUR-VPC-ID
tags = {Name = YOUR-SECURITY-GROUP-NAME}
}
resource "aws_security_group_rule" "ingress" {
type = "ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "2049"
to_port = "2049"
protocol = "tcp"
security_group_id = aws_security_group.general.id
}
resource "aws_security_group_rule" "egress" {
type = "egress"
description = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "0"
to_port = "0"
protocol = "all"
security_group_id = aws_security_group.general.id
}
You'll be able to mount your EFS drive on any EC2 Default AMZN Linux Instance like this: mkdir /data/efs && mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport REPLACE_WITH_EFS_DNS:/ /data/efs
Inappropriate value for attribute "route": element 0: attributes "carrier_gateway_id", "cidr_block", "destination_prefix_list_id", "gateway_id",
│ "instance_id", "local_gateway_id", "nat_gateway_id", "network_interface_id", "transit_gateway_id", "vpc_endpoint_id", and "vpc_peering_connection_id" are
│ required.
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "main"
}
}
resource "aws_subnet" "public-subnet-1" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
tags = {
Name = "public-main-1"
}
}
resource "aws_subnet" "public-subnet-2" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.3.0/24"
tags = {
Name = "public-main-2"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.main.id
tags = {
Name = "gw"
}
}
resource "aws_route_table" "rt" {
vpc_id = aws_vpc.main.id
route = [
{
cidr_block = "10.0.1.0/24"
gateway_id = aws_internet_gateway.gw.id
}
]
tags = {
Name = "rt"
}
}
Add all optional parameters to mitigate errors.
Here all empty blocks/parameters are optional but we have to provide all these to avoid errors.
resource "aws_route_table" "rt" {
vpc_id = aws_vpc.main.id
route = [
{
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
carrier_gateway_id = ""
destination_prefix_list_id = ""
egress_only_gateway_id = ""
instance_id = ""
ipv6_cidr_block = ""
local_gateway_id = ""
nat_gateway_id = ""
network_interface_id = ""
transit_gateway_id = ""
vpc_endpoint_id = ""
vpc_peering_connection_id = ""
}
]
tags = {
Name = "rt"
}
}
I have written a terraform configuration file for a bastion entry point on an application.
ami = var.ami
ebs_optimized = var.ebs_optimized
iam_instance_profile = aws_iam_instance_profile.iam_instance_profile
instance_type = var.instance_type
key_name = "quadops"
subnet_id = var.subnet_id
user_data = var.user_data
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
volume_tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
vpc_security_group_ids = [aws_security_group.security_group.id]
}
resource "aws_security_group" "security_group" {
name = "${var.name}-security-group"
vpc_id = var.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_instance_profile" "iam_instance_profile" {
name = "${var.name}-iam-instance-profile"
role = aws_iam_role.iam_role
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_role" "iam_role" {
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
name = "${var.name}-iam-role"
tags = {
Name = "${var.name}-iam-role"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_eip" "eip" {
vpc = true
instance = aws_instance.instance.id
tags = {
Name = "${var.name}-eip"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "cloudflare_record" "record" {
zone_id = var.zone_id
name = "bastion.${var.env}"
type = "A"
value = "aws_eip.eip.public_ip"
}
Upon running plan, i'm getting this error.
on .terraform/modules/bastion/main.tf line 49, in resource "aws_iam_instance_profile" "iam_instance_profile":
49: role = aws_iam_role.iam_role
|----------------
| aws_iam_role.iam_role is object with 15 attributes
Inappropriate value for attribute "role": string required.
I can't seem to get over this hurdle as I think i'm calling the resource correctly but terraform12 says that it requires a string am I passing the values incorrectly? Thanks.
You are passing the entire aws_iam_role object to the role argument which is causing the error. Instead, try passing the name of the role like so:
resource "aws_iam_instance_profile" "iam_instance_profile" {
role = aws_iam_role.iam_role.name
}
I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.