Inappropriate value for attribute "route": element 0: attributes - terraform

Inappropriate value for attribute "route": element 0: attributes "carrier_gateway_id", "cidr_block", "destination_prefix_list_id", "gateway_id",
│ "instance_id", "local_gateway_id", "nat_gateway_id", "network_interface_id", "transit_gateway_id", "vpc_endpoint_id", and "vpc_peering_connection_id" are
│ required.
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "main"
}
}
resource "aws_subnet" "public-subnet-1" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
tags = {
Name = "public-main-1"
}
}
resource "aws_subnet" "public-subnet-2" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.3.0/24"
tags = {
Name = "public-main-2"
}
}
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.main.id
tags = {
Name = "gw"
}
}
resource "aws_route_table" "rt" {
vpc_id = aws_vpc.main.id
route = [
{
cidr_block = "10.0.1.0/24"
gateway_id = aws_internet_gateway.gw.id
}
]
tags = {
Name = "rt"
}
}

Add all optional parameters to mitigate errors.
Here all empty blocks/parameters are optional but we have to provide all these to avoid errors.
resource "aws_route_table" "rt" {
vpc_id = aws_vpc.main.id
route = [
{
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
carrier_gateway_id = ""
destination_prefix_list_id = ""
egress_only_gateway_id = ""
instance_id = ""
ipv6_cidr_block = ""
local_gateway_id = ""
nat_gateway_id = ""
network_interface_id = ""
transit_gateway_id = ""
vpc_endpoint_id = ""
vpc_peering_connection_id = ""
}
]
tags = {
Name = "rt"
}
}

Related

Add storage to AWS batch job in Terrafrom

I have a terraform code that create an EC2 type Batch job , and my aws batch job download some data a total of 50GB ,how to add that storage space to my instance in terrafrom ? and if there is another way to add that storage
This is my terrafrom code
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus}
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
If you want you may be able to mount a shared EFS drive you could try something like this. Keep in mind I have not tested this & you will need to replace certain parameters with your subnet-ids, vpc-id, etc:
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus},
"mountPoints": [
{
readOnly = null,
containerPath = "/var/batch"
sourceVolume = "YOUR-FILE-SYSTEM-NAME"
}
]
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
resource "aws_efs_file_system" "general" {
creation_token = "YOUR-FILE-SYSTEM-NAME"
#kms_key_id = module.kms.arn
#encrypted = true
encrypted = false
performance_mode = "generalPurpose"
throughput_mode = "provisioned"
provisioned_throughput_in_mibps = 8
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
}
resource "aws_efs_access_point" "general" {
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
file_system_id = aws_efs_file_system.general.id
root_directory {
path = "/YOUR-FILE-SYSTEM-NAME"
creation_info {
owner_gid = "1000"
owner_uid = "1000"
permissions = "755"
}
}
posix_user {
uid = "1000"
gid = "1000"
}
}
## FOR REDUNDANCY
## It is a good idea to add a mount target per AZ you use
resource "aws_efs_mount_target" "a" {
source = "app.terraform.io/popreach/efs-mount-target/aws"
version = "1.0.0"
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-A
security_groups = [aws_security_group.general.id]
}
resource "aws_efs_mount_target" "b" {
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-B
security_groups = [aws_security_group.general.id]
}
resource "aws_security_group" "general" {
name = YOUR-SECURITY-GROUP-NAME
vpc_id = YOUR-VPC-ID
tags = {Name = YOUR-SECURITY-GROUP-NAME}
}
resource "aws_security_group_rule" "ingress" {
type = "ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "2049"
to_port = "2049"
protocol = "tcp"
security_group_id = aws_security_group.general.id
}
resource "aws_security_group_rule" "egress" {
type = "egress"
description = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "0"
to_port = "0"
protocol = "all"
security_group_id = aws_security_group.general.id
}
You'll be able to mount your EFS drive on any EC2 Default AMZN Linux Instance like this: mkdir /data/efs && mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport REPLACE_WITH_EFS_DNS:/ /data/efs

Adding service endpoints as part of subnet variables in network module in terraform

I have created a network module. Below is the code for it
variables.tf
variable "resource_group_name" {
description = "Name of the resource group to be imported."
type = string
}
variable "location" {
description = "The location of the vnet to create. Defaults to the location of the resource group."
type = string
default = null
}
variable "vnets" {
type = map(object({
address_space = string
subnets = list(object({
subnet_name = string
subnet_address = string
service_endpoints = list(string)
}))
}))
default = {
"bupavnet1" = {
address_space = "192.168.0.0/16",
subnets = []
},
"bupavnet2" = {
address_space = "10.0.0.0/16",
subnets = [
{
subnet_name = "subnet1_bupavnet1"
subnet_address = "10.0.2.0/24"
service_endpoints = []
},
{
subnet_name = "subnet2_bupavnet1"
subnet_address = "10.0.0.0/24"
service_endpoints = ["Microsoft.AzureCosmosDB","Microsoft.ContainerRegistry"]
}
]
},
"bupavnet3" = {
address_space = "10.80.0.0/16"
subnets = [
{
subnet_name = "subnet1_bupavnet3"
subnet_address = "10.80.2.0/24"
service_endpoints = ["Microsoft.AzureCosmosDB","Microsoft.ContainerRegistry"]
},
{
subnet_name = "subnet2_bupavnet3"
subnet_address = "10.80.1.0/24"
service_endpoints = ["Microsoft.AzureCosmosDB","Microsoft.ContainerRegistry"]
},
{
subnet_name = "subnet3_bupavnet3"
subnet_address = "10.80.0.0/24"
service_endpoints = ["Microsoft.AzureCosmosDB","Microsoft.ContainerRegistry"]
},
]
}
}
}
output.tf
output "vnet_names" {
description = "The name of the virtual networks"
value = tomap({for k, v in azurerm_virtual_network.vnets: k => v.name})
}
output "vnet_addresses" {
description = "The name of the virtual networks"
value = tomap({for k, v in azurerm_virtual_network.vnets: k => v.address_space})
}
output "subnet_names" {
description = "The name of the subnets"
value = tomap({for k, v in azurerm_subnet.subnets: k => v.name})
}
output "subnet_addresses" {
description = "The name of the subnet addresses"
value = {for k, v in azurerm_subnet.subnets: k => v.address_prefixes}
}
output "subnet_ids" {
description = "The name of the subnet addresses"
value = tomap({for k, v in azurerm_subnet.subnets: k => v.id})
}
main.tf
data "azurerm_resource_group" "network" {
name = var.resource_group_name
}
resource "azurerm_virtual_network" "vnets" {
for_each = var.vnets
name = each.key
resource_group_name = data.azurerm_resource_group.network.name
location = data.azurerm_resource_group.network.location
address_space = [each.value.address_space]
}
resource "azurerm_subnet" "subnets" {
for_each = local.subnets
name = each.value.subnet_name
resource_group_name = data.azurerm_resource_group.network.name
virtual_network_name = azurerm_virtual_network.vnets[each.value.vnet_name].name
address_prefixes = [each.value.subnet_address]
service_endpoints = [each.value.service_endpoints]
}
locals.tf
locals {
subnets_flatlist = flatten([for key, val in var.vnets : [
for subnet in val.subnets : {
vnet_name = key
subnet_name = subnet.subnet_name
subnet_address = subnet.subnet_address
service_endpoints = subnet.service_endpoints
}
]
])
subnets = { for subnet in local.subnets_flatlist : subnet.subnet_name => subnet }
}
main.tf
resource "azurerm_resource_group" "rg2" {
name = "rg2"
location = "Australia East"
}
module "network" {
source = "./network_resources"
resource_group_name = azurerm_resource_group.rg2.name
location = azurerm_resource_group.rg2.location
}
I am getting the below error when I am doing terraform plan
│ Error: Incorrect attribute value type
│
│ on network_resources\main.tf line 21, in resource "azurerm_subnet" "subnets":
│ 21: service_endpoints = [each.value.service_endpoints]
│ ├────────────────
│ │ each.value.service_endpoints is list of string with 2 elements
│
│ Inappropriate value for attribute "service_endpoints": element 0: string required.
Please can you let me know how service_endpoints can be added as part of variable vnets so that the module will start working fine
The reason you get this error is that your service_endpoints is already a list, but you are wrapping it in [] which is creating a list of lists. Remove the [] from service_endpoints:
resource "azurerm_subnet" "subnets" {
for_each = local.subnets
name = each.value.subnet_name
resource_group_name = data.azurerm_resource_group.network.name
virtual_network_name = azurerm_virtual_network.vnets[each.value.vnet_name].name
address_prefixes = [each.value.subnet_address]
service_endpoints = each.value.service_endpoints
}

Inappropriate value for "role":string required terraform12

I have written a terraform configuration file for a bastion entry point on an application.
ami = var.ami
ebs_optimized = var.ebs_optimized
iam_instance_profile = aws_iam_instance_profile.iam_instance_profile
instance_type = var.instance_type
key_name = "quadops"
subnet_id = var.subnet_id
user_data = var.user_data
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
volume_tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
vpc_security_group_ids = [aws_security_group.security_group.id]
}
resource "aws_security_group" "security_group" {
name = "${var.name}-security-group"
vpc_id = var.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_instance_profile" "iam_instance_profile" {
name = "${var.name}-iam-instance-profile"
role = aws_iam_role.iam_role
tags = {
Name = "${var.name}"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_iam_role" "iam_role" {
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
name = "${var.name}-iam-role"
tags = {
Name = "${var.name}-iam-role"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "aws_eip" "eip" {
vpc = true
instance = aws_instance.instance.id
tags = {
Name = "${var.name}-eip"
Business = "Infrastracture"
app_name = "infra"
app_env = "${var.env}"
}
}
resource "cloudflare_record" "record" {
zone_id = var.zone_id
name = "bastion.${var.env}"
type = "A"
value = "aws_eip.eip.public_ip"
}
Upon running plan, i'm getting this error.
on .terraform/modules/bastion/main.tf line 49, in resource "aws_iam_instance_profile" "iam_instance_profile":
49: role = aws_iam_role.iam_role
|----------------
| aws_iam_role.iam_role is object with 15 attributes
Inappropriate value for attribute "role": string required.
I can't seem to get over this hurdle as I think i'm calling the resource correctly but terraform12 says that it requires a string am I passing the values incorrectly? Thanks.
You are passing the entire aws_iam_role object to the role argument which is causing the error. Instead, try passing the name of the role like so:
resource "aws_iam_instance_profile" "iam_instance_profile" {
role = aws_iam_role.iam_role.name
}

Terraform does not deploy correct resources

I'm new to Terraform and unable to deploy the below and banging my head against it. The Terraform is unable to deploy the correct resources to my environment and unsure of what I've missed. Any help is appreciated. Code is below.
Thanks (Any advice in general on Terraform is appreciated. I can tell its going to be something very obvious but a one hour exercise has been bothering me all day!)
provider "aws" {
region="us-east-1"
}
data "aws_vpc" "vpc" {
tags = {
Name = "vpc"
}
}
data "aws_subnet_ids" "ecommerce" {
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_db_subnet_group" "aurora" {
name = "aurora"
subnet_ids = ["${data.aws_subnet_ids.ecommerce.ids}"]
tags = {
Name = "database"
}
}
resource "aws_security_group" "database" {
name = "database"
description = "ecommerce database"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "database"
}
}
resource "aws_security_group" "web" {
name = "web"
description = "ecommerce web"
vpc_id = "${data.aws_vpc.vpc.id}"
tags = {
Name = "web"
}
}
resource "aws_security_group_rule" "web_allow_https_in" {
description = "HTTPS access"
type = "ingress"
from_port = 443
to_port=443
protocol = "tcp"
cidr_blocks = ["8.8.8.8/32"]
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "web_allow_mysql_out" {
description = "MySQL access"
type = "egress"
from_port= 3306
to_port = 3306
protocol = "tcp"
security_group_id = "${aws_security_group.web.id}"
}
resource "aws_security_group_rule" "db_allow_web_in" {
description = "Web access"
type = "ingress"
from_port = 3306
to_port = 3306
protocol="tcp"
security_group_id = "${aws_security_group.database.id}"
source_security_group_id = "${aws_security_group.web.id}"
}
data "aws_ami" "web" {
owners = ["self"]
most_recent = true
filter {
name = "name"
values = ["web*"]
}
}
resource "aws_instance" "web" {
ami = "${data.aws_ami.web.id}"
ebs_optimized = true
instance_type = "t3.micro"
associate_public_ip_address = false
vpc_security_group_ids = ["${aws_security_group.web.id}"]
subnet_id = "${data.aws_subnet_ids.ecommerce.ids[0]}"
ebs_block_device = {
device_name = "/dev/sdb"
volume_type = "gp2"
volume_size = "16"
delete_on_termination = true
}
tags = {
Name = "Web"
}
lifecycle {
ignore_changes = ["ami"]
}
}
resource "aws_rds_cluster" "aurora" {
apply_immediately = true
backup_retention_period = 7
cluster_identifier = "mydatabase"
engine = "aurora"
database_name = "main"
storage_encrypted=true
kms_key_id = "arn:aws**"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
final_snapshot_identifier = "final"
master_password = "*"
master_username = "*"
vpc_security_group_ids = ["${aws_security_group.database.id}"]
}
resource "aws_rds_cluster_instance" "aurora" {
count = 2
apply_immediately = true
identifier = "mydatabase-${count.index}"
cluster_identifier = "${aws_rds_cluster.aurora.id}"
db_subnet_group_name = "${aws_db_subnet_group.aurora.id}"
instance_class = "db.r12.large"
publicly_accessible = false
}
resource "aws_eip" "eip" {
vpc = true
}
resource "aws_eip_association" "assoc" {
instance_id = "${aws_instance.web.id}"
allocation_id = "${aws_eip.eip.id}"
}
data "aws_route53_zone" "zone" {
name = "ecommerce.com"
}
resource "aws_route53_record" "record" {
zone_id = "${data.aws_route53_zone.zone.zone_id}"
type="A"
name = "www.ecommerce.com"
ttl = 300
records = ["${aws_eip.eip.public_ip}"]
}

ECS and Application Load Balancer Ephemeral Ports using Terraform

I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.

Resources