I'm receiving this curious error message
PlatformTaskDefinitionIncompatibilityException: The specified platform does not satisfy the task definition’s required capabilities
I suspect it's something to do with this line although not quite sure
file_system_id = aws_efs_file_system.main.id
This is my script:
provider "aws" {
region = "us-east-1"
profile = var.profile
}
### Network
# Fetch AZs in the current region
data "aws_availability_zones" "available" {}
resource "aws_vpc" "main" {
cidr_block = "172.17.0.0/16"
}
# Create var.az_count private subnets, each in a different AZ
resource "aws_subnet" "private" {
count = "${var.az_count}"
cidr_block = "${cidrsubnet(aws_vpc.main.cidr_block, 8, count.index)}"
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
vpc_id = "${aws_vpc.main.id}"
}
# Create var.az_count public subnets, each in a different AZ
resource "aws_subnet" "public" {
count = "${var.az_count}"
cidr_block = "${cidrsubnet(aws_vpc.main.cidr_block, 8, var.az_count + count.index)}"
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
vpc_id = "${aws_vpc.main.id}"
map_public_ip_on_launch = true
}
# IGW for the public subnet
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.main.id}"
}
# Route the public subnet traffic through the IGW
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.main.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
# Create a NAT gateway with an EIP for each private subnet to get internet connectivity
resource "aws_eip" "gw" {
count = "${var.az_count}"
vpc = true
depends_on = ["aws_internet_gateway.gw"]
}
resource "aws_nat_gateway" "gw" {
count = "${var.az_count}"
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
allocation_id = "${element(aws_eip.gw.*.id, count.index)}"
}
# Create a new route table for the private subnets
# And make it route non-local traffic through the NAT gateway to the internet
resource "aws_route_table" "private" {
count = "${var.az_count}"
vpc_id = "${aws_vpc.main.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.gw.*.id, count.index)}"
}
}
# Explicitely associate the newly created route tables to the private subnets (so they don't default to the main route table)
resource "aws_route_table_association" "private" {
count = "${var.az_count}"
subnet_id = "${element(aws_subnet.private.*.id, count.index)}"
route_table_id = "${element(aws_route_table.private.*.id, count.index)}"
}
### Security
# ALB Security group
# This is the group you need to edit if you want to restrict access to your application
resource "aws_security_group" "lb" {
name = "tf-ecs-alb"
description = "controls access to the ALB"
vpc_id = "${aws_vpc.main.id}"
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Traffic to the ECS Cluster should only come from the ALB
resource "aws_security_group" "ecs_tasks" {
name = "tf-ecs-tasks"
description = "allow inbound access from the ALB only"
vpc_id = "${aws_vpc.main.id}"
ingress {
protocol = "tcp"
from_port = "${var.app_port}"
to_port = "${var.app_port}"
security_groups = ["${aws_security_group.lb.id}"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
### ALB
resource "aws_alb" "main" {
name = "tf-ecs-chat"
subnets = aws_subnet.public.*.id
security_groups = ["${aws_security_group.lb.id}"]
}
resource "aws_alb_target_group" "app" {
name = "tf-ecs-chat"
port = 80
protocol = "HTTP"
vpc_id = "${aws_vpc.main.id}"
target_type = "ip"
}
# Redirect all traffic from the ALB to the target group
resource "aws_alb_listener" "front_end" {
load_balancer_arn = "${aws_alb.main.id}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.app.id}"
type = "forward"
}
}
### ECS
resource "aws_ecs_cluster" "main" {
name = "tf-ecs-cluster"
}
resource "aws_ecs_task_definition" "app" {
family = "app"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = "${var.fargate_cpu}"
memory = "${var.fargate_memory}"
task_role_arn = "${aws_iam_role.ecs_task_role_role.arn}"
execution_role_arn = "${aws_iam_role.ecs_task_role_role.arn}"
container_definitions = <<DEFINITION
[
{
"cpu": ${var.fargate_cpu},
"image": "${var.app_image}",
"memory": ${var.fargate_memory},
"name": "app",
"networkMode": "awsvpc",
"portMappings": [
{
"containerPort": ${var.app_port},
"hostPort": ${var.app_port}
}
]
}
]
DEFINITION
volume {
name = "efs-html"
efs_volume_configuration {
file_system_id = aws_efs_file_system.main.id
root_directory = "/opt/data"
}
}
}
resource "aws_ecs_service" "main" {
name = "tf-ecs-service"
cluster = "${aws_ecs_cluster.main.id}"
task_definition = "${aws_ecs_task_definition.app.arn}"
desired_count = "${var.app_count}"
launch_type = "FARGATE"
network_configuration {
security_groups = ["${aws_security_group.ecs_tasks.id}"]
subnets = aws_subnet.private.*.id
}
load_balancer {
target_group_arn = "${aws_alb_target_group.app.id}"
container_name = "app"
container_port = "${var.app_port}"
}
depends_on = [
"aws_alb_listener.front_end",
]
}
# ECS roles & policies
# Create the IAM task role for ECS Task definition
resource "aws_iam_role" "ecs_task_role_role" {
name = "test-ecs-task-role"
assume_role_policy = "${file("ecs-task-role.json")}"
tags = {
Terraform = "true"
}
}
# Create the AmazonECSTaskExecutionRolePolicy managed role
resource "aws_iam_policy" "ecs_task_role_policy" {
name = "test-ecs-AmazonECSTaskExecutionRolePolicy"
description = "Provides access to other AWS service resources that are required to run Amazon ECS tasks"
policy = "${file("ecs-task-policy.json")}"
}
# Assign the AmazonECSTaskExecutionRolePolicy managed role to ECS
resource "aws_iam_role_policy_attachment" "ecs_task_policy_attachment" {
role = "${aws_iam_role.ecs_task_role_role.name}"
policy_arn = "${aws_iam_policy.ecs_task_role_policy.arn}"
}
resource "aws_efs_file_system" "main" {
tags = {
Name = "ECS-EFS-FS"
}
}
resource "aws_efs_mount_target" "main" {
count = "${var.subnets-count}"
file_system_id = "${aws_efs_file_system.main.id}"
subnet_id = "${element(var.subnets, count.index)}"
}
variables.tf
variable "az_count" {
description = "Number of AZs to cover in a given AWS region"
default = "2"
}
variable "app_image" {
description = "Docker image to run in the ECS cluster"
default = "xxxxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/test1:nginx"
}
variable "app_port" {
description = "Port exposed by the docker image to redirect traffic to"
# default = 3000
default = 80
}
variable "app_count" {
description = "Number of docker containers to run"
default = 2
}
variable "fargate_cpu" {
description = "Fargate instance CPU units to provision (1 vCPU = 1024 CPU units)"
default = "256"
}
variable "fargate_memory" {
description = "Fargate instance memory to provision (in MiB)"
default = "512"
}
################
variable "subnets" {
type = "list"
description = "list of subnets to mount the fs to"
default = ["subnet-xxxxxxx","subnet-xxxxxxx"]
}
variable "subnets-count" {
type = "string"
description = "number of subnets to mount to"
default = 2
}
You simply require to upgrade your ecs service to latest version
resource "aws_ecs_service" "service" {
platform_version = "1.4.0"
launch_type = "FARGATE"
...
}
efs feature is only available on the latest version
When you don’t specify platform_version, it will default to LATEST which is set to 1.3.0 which doesn’t allow efs volumes.
UPDATE: As of 1/21/22, it seems that the LATEST ECS service version is 1.4.0, so explicitly specifying the ECS platform version is no longer necessary to have EFS mounts work. Per:
https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform-linux-fargate.html
Related
I have the following simple EC2 creating terraform script:
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 3.27"
}
}
required_version = ">= 0.14.9"
}
provider "aws" {
profile = "default"
region = "us-east-1" # virginia
}
resource "aws_network_interface" "network" {
subnet_id = "subnet-0*******"
security_groups = ["sg-******"]
attachment {
instance = aws_instance.general_instance.id
device_index = 0
}
}
resource "aws_instance" "general_instance" {
ami = "ami-00874d747dde814fa" # unbutu server
instance_type = "m5.2xlarge"
key_name = "my-key"
root_block_device {
delete_on_termination = true
volume_size = 500
tags = { Name = "Root Volume" }
}
# user_data = file("startup.sh") # file directive can install stuff
tags = {
Name = "General"
}
}
I get the following:
Error: Error launching source instance: VPCIdNotSpecified: No default VPC for this user. GroupName is only supported for EC2-Classic and default VPC.
I find this odd because the classic flow is to make a VPC, make a subnet and then make a network interface. However, I have a VPC I want to use that is associated with the subnet I'm using. So I'm wondering whey it's asking for a VPC id if I have it associated with the subnet I'm requesting.
Thanks in advance
I figured it out already.
resource "aws_instance" "general_instance" {
ami = "ami-00874d747dde814fa" # unbutu server
instance_type = "m5.2xlarge"
key_name = "EC2-foundry"
network_interface {
network_interface_id = aws_network_interface.network.id
device_index = 0
}
root_block_device {
delete_on_termination = true
volume_size = 500
tags = { Name = "Foundry Root Volume" }
}
# user_data = file("startup.sh") # file directive can install stuff
tags = {
Name = "Foundry General"
}
}
network interface must be attached in the aws resource
I need to setup multiple private subnets at AWS per account and I need to have only one NAT GW per Account and traffic routed to this. The problem I guess is that the values are a map without an index. As I remember with count you have an index which can be simply accessed subnet_id = aws_subnet.private[0].id But I can't change the current setup. I need to create an idnex out of this map.
I have a yaml file with this values:
aws:
- accounts: ciss-goesaws-test
private_subnets:
-
az: eu-central-1a
short: a
cidr: 10.44.4.96/27
-
az: eu-central-1b
short: b
cidr: 10.44.5.128/27
-
az: eu-central-1c
short: c
cidr: 10.44.6.160/27
I have the following terraform code. But this creates one NAT GW per subnet. I need to NAT GW to be created in only one of the subnets.
locals {
private = flatten([
for a in var.aws : [
for ps in a.private_subnets : {
accounts = a.accounts
az = ps.az
cidr = ps.cidr
short = ps.short
}
]
])
}
resource "aws_eip" "this" {
vpc = true
}
resource "aws_nat_gateway" "this" {
for_each = {
for cidr_block in local.private : cidr_block.cidr => cidr_block
}
allocation_id = aws_eip.this.id
subnet_id = aws_subnet.private[each.key].id
}
resource "aws_subnet" "private" {
for_each = {
for cidr_block in local.private : cidr_block.cidr => cidr_block
}
availability_zone = each.value.az
cidr_block = each.value.cidr
vpc_id = aws_vpc.this.id
tags = {
Name = "${each.value.accounts}-private-${each.value.short}"
}
}
resource "aws_route_table" "private" {
for_each = {
for cidr_block in local.private : cidr_block.cidr => cidr_block
}
vpc_id = aws_vpc.this.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.this[each.key].id
}
}
resource "aws_route_table_association" "private" {
for_each = {
for cidr_block in local.private : cidr_block.cidr => cidr_block
}
subnet_id = aws_subnet.private[each.key].id
route_table_id = aws_route_table.private[each.key].id
}
As the CIDRs are used as index for the subnet resource collection, you have to select one (maybe first) for the NAT GW.
For example:
locals {
nat_gw_subnet_cidr = var.aws[0].private_subnets[0].cidr
}
resource "aws_nat_gateway" "this" {
allocation_id = aws_eip.this.id
subnet_id = aws_subnet.private[local.nat_gw_subnet_cidr].id
}
# `aws_subnet` resource as in the question
# One route table should be enough, as all subnets share the same GW
resource "aws_route_table" "private" {
vpc_id = aws_vpc.this.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.this.id
}
# No route for the VPC?
}
resource "aws_route_table_association" "private" {
# Simpler to use the subnets as index
for_each = aws_subnet.private
subnet_id = each.value.id
route_table_id = aws_route_table.private.id
}
I am fairly new to TF and I have written some basic code. Enough to get a vpc up and running and add some subnets and deploy a simple ec2. I am starting to get to the point of wanting to use modules. I struggle with the "keeping generic" thing so I can reuse them over and over. I do not understand how values get passed into modules. For instance I have a module that deploys a vpc, within the same project I have a module that deploys a vpc endpoint. The questions becomes how do i get the value of the vpc_id created with vpc module into the vpc endpoint module? Does anyone have an example of this?
main.tf
provider "aws" {
region = var.aws_region
}
/*Module for VPC creation*/
module "vpc" {
source = "./modules/vpc"
vpc_cidr = var.vpc_cidr
environment = var.environment
tnt_public_subnets_cidr = var.tnt_public_subnets_cidr
availability_zones = var.availability_zones
}
/*Module for EC2 Webserver creation*/
module "webserver" {
source = "./modules/ec2/webserver"
count = var.instance_count
environment = var.environment
subnet_id = module.vpc.tnt_public_subnets_cidr.id
}
/*Module for VPC endpoint creation*/
module "s3-vpce"{
source = "git::https://github.com/tn-sts-cloudtn/sts-terraform-
modules.git//s3-vpce-module/modules//s3-vpce"
vpc_id = module.vpc.vpc_id
}
VPC Module TF File:
/*==== The VPC ======*/
resource "aws_vpc" "vpc" {
cidr_block = var.vpc_cidr
assign_generated_ipv6_cidr_block = true
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "${var.environment}_vpc"
Environment = var.environment
}
}
/*==== Internet Gateway for Public Subnets ======*/
/* Internet gateway for the public subnet */
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.vpc.id
tags = {
Name = "${var.environment}_igw"
Environment = var.environment
}
}
/* Elastic IP for NAT
resource "aws_eip" "instance_eip" {
count = 1
vpc = true
depends_on = [aws_internet_gateway.tnt_igw]
tags ={
Name = "sts_net_infra-mgmt_eip${count.index + 1}"
Environment = var.environment
}
}*/
/* Public subnet */
resource "aws_subnet" "public_subnets_cidr" {
vpc_id = aws_vpc.vpc.id
count = length(var.availability_zones)
cidr_block = var.public_subnets_cidr [count.index]
availability_zone = element(var.availability_zones, count.index)
map_public_ip_on_launch = true
tags ={
Name = "${var.environment}_mgmt_subnet_${count.index + 1}"
Environment = var.environment
}
}
/* Private subnet
resource "aws_subnet" "tnt_private_subnet" {
vpc_id = aws_vpc.tnt_vpc.id
count = length(var.tnt_private_subnets_cidr)
cidr_block = var.tnt_public_subnets_cidr [count.index]
availability_zone = element(var.availability_zones, count.index)
map_public_ip_on_launch = false
tags = {
#Name = var.environment-private-subnet
Environment = var.environment
}
*/
/* Routing table for private subnet
resource "aws_route_table" "tnt_private_rtb" {
vpc_id = aws_vpc.tnt_vpc.id
tags = {
Name = var.environment_private_route_table
Environment = var.environment
}
}*/
/* Routing table for public subnet */
resource "aws_route_table" "public_rtb" {
vpc_id = aws_vpc.vpc.id
tags = {
Name = "${var.environment}_public_route_table"
Environment = var.environment
}
}
resource "aws_route" "public_internet_gateway" {
route_table_id = aws_route_table.public_rtb.id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.igw.id
}
/* Route table associations */
resource "aws_route_table_association" "public" {
count = length(var.public_subnets_cidr)
subnet_id = element(aws_subnet.public_subnets_cidr.*.id, count.index)
route_table_id = aws_route_table.public_rtb.id
}
So I need to output the VPC ID for the VPC endpoint, so I tried to use an output.
output.tf
output "vpc_cidr" {
value = aws_vpc.tnt_vpc.id
}
output "tnt_public_subnets_cidr"{
value = aws_subnet.tnt_public_subnets_cidr.*.id
}
output "vpc_id" {
description = "The ID of the VPC"
value = aws_vpc.vpc.id
}
output "vpc_arn" {
description = "The ARN of the VPC"
value = concat(aws_vpc.tnt_vpc.*.arn, [""])[0]
}
I know I am doing it incorrectly, but I am struggling to understand how outputs flow from module to module.
You can use one moule output into onether module by defining output in each module.
I have created a route table with routing rules that refers to an existing internet gateway (IGW) and the route table is associated to the a new VPC created via TF template. However same IGW is already attached to another VPC. When I apply template it throws the following error,
Error: Error creating route: InvalidParameterValue: route table "X" and network gateway "Y" belong to different networks
status code: 400, request id: ab91c2ab-ef1e-4905-8a78-b6759bc1e250
Is this because an internet gateway can be attached only to a single VPC and has to reside within the same VPC? Or is this error caused due to any other reason?
try below code with terraform.
it has VPC, IGW, Subnets, Route tables and NAT gateway.
and it works well.
variable "region" {
default = "us-east-1"
}
variable "service_name" {
default = "demo-service"
}
locals {
public_subnets = {
"${var.region}a" = "10.10.101.0/24"
"${var.region}b" = "10.10.102.0/24"
"${var.region}c" = "10.10.103.0/24"
}
private_subnets = {
"${var.region}a" = "10.10.201.0/24"
"${var.region}b" = "10.10.202.0/24"
"${var.region}c" = "10.10.203.0/24"
}
}
resource "aws_vpc" "this" {
cidr_block = "10.10.0.0/16"
enable_dns_support = true
enable_dns_hostnames = true
tags = {
Name = "${var.service_name}-vpc"
}
}
resource "aws_internet_gateway" "this" {
vpc_id = "${aws_vpc.this.id}"
tags = {
Name = "${var.service_name}-internet-gateway"
}
}
resource "aws_subnet" "public" {
count = "${length(local.public_subnets)}"
cidr_block = "${element(values(local.public_subnets), count.index)}"
vpc_id = "${aws_vpc.this.id}"
map_public_ip_on_launch = true
availability_zone = "${element(keys(local.public_subnets), count.index)}"
tags = {
Name = "${var.service_name}-service-public"
}
}
resource "aws_subnet" "private" {
count = "${length(local.private_subnets)}"
cidr_block = "${element(values(local.private_subnets), count.index)}"
vpc_id = "${aws_vpc.this.id}"
map_public_ip_on_launch = true
availability_zone = "${element(keys(local.private_subnets), count.index)}"
tags = {
Name = "${var.service_name}-service-private"
}
}
resource "aws_default_route_table" "public" {
default_route_table_id = "${aws_vpc.this.main_route_table_id}"
tags = {
Name = "${var.service_name}-public"
}
}
resource "aws_route" "public_internet_gateway" {
count = "${length(local.public_subnets)}"
route_table_id = "${aws_default_route_table.public.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.this.id}"
timeouts {
create = "5m"
}
}
resource "aws_route_table_association" "public" {
count = "${length(local.public_subnets)}"
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
route_table_id = "${aws_default_route_table.public.id}"
}
resource "aws_route_table" "private" {
vpc_id = "${aws_vpc.this.id}"
tags = {
Name = "${var.service_name}-private"
}
}
resource "aws_route_table_association" "private" {
count = "${length(local.private_subnets)}"
subnet_id = "${element(aws_subnet.private.*.id, count.index)}"
route_table_id = "${aws_route_table.private.id}"
}
resource "aws_eip" "nat" {
vpc = true
tags = {
Name = "${var.service_name}-eip"
}
}
resource "aws_nat_gateway" "this" {
allocation_id = "${aws_eip.nat.id}"
subnet_id = "${aws_subnet.public.0.id}"
tags = {
Name = "${var.service_name}-nat-gw"
}
}
resource "aws_route" "private_nat_gateway" {
route_table_id = "${aws_route_table.private.id}"
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = "${aws_nat_gateway.this.id}"
timeouts {
create = "5m"
}
}
Refer to this repository : ecs-with-codepipeline-example-by-terraform
Thank you all, it turned out to be an issue with VPC which internet gateway is attached to. Internet gateway must chose a VPC which it must be created. You cannot route traffic to an internet gateway not within the same VPC as it wouldn't have access to it otherwise. Therefore my attempt to route traffic to an internet gateway external to the VPC is not allowed.
This issue is resolved by creating a new internet gateway within the new VPC I created. However this mean I cannot use existing internet gateway thereby introducing other issue such as need to inform external partners to add permission to the new public IP of the internet gateway.
Here I'm trying to create one subnet per availability zone and then associate the route table with each of them.
locals {
aws_region = "${var.aws_regions[var.profile]}"
base_name = "${var.product}-${local.aws_region}"
aws_avzones = {
pro = ["eu-west-1a", "eu-west-1b", "eu-west-1c"]
dev = ["eu-west-2a", "eu-west-2b", "eu-west-2c"]
}
}
# ---
# Create VPC
resource "aws_vpc" "default" {
cidr_block = "${var.vpc_cidr_block}"
tags = {
Name = "${local.base_name}-vpc"
}
}
# ---
# Create public subnets - each in a different AZ
resource "aws_subnet" "public" {
count = "${length(local.aws_avzones[var.profile])}"
vpc_id = "${aws_vpc.default.id}"
cidr_block = "${cidrsubnet(var.vpc_cidr_block, 8, count.index)}"
availability_zone = "${element(local.aws_avzones[var.profile], count.index)}"
map_public_ip_on_launch = 1
tags = {
"Name" = "Public subnet - ${element(local.aws_avzones[var.profile], count.index)}"
}
}
# ---
# Create Internet gateway for inbound-outbound connections
resource "aws_internet_gateway" "default" {
vpc_id = "${aws_vpc.default.id}"
tags = {
"Name" = "${local.base_name}-igw"
}
}
# ---
# Create Internet gateway routes table
resource "aws_route_table" "pub" {
vpc_id = "${aws_vpc.default.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.default.id}"
}
tags = {
Name = "${local.base_name}-rtb-igw"
}
}
# ---
# Associate public subnets with the public route table
resource "aws_route_table_association" "pub" {
count = "${length(aws_subnet.public.*.id)}"
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
route_table_id = "${aws_route_table.pub.id}"
}
Unfortunately terraform plan renders an error:
aws_route_table_association.pub: aws_route_table_association.pub: value of 'count' cannot be computed
Why it cannot be computed? Terraform did not complain about that when the infra. was all up and running, I discovered this error only after the destruction when attempting to to recreate the infra.
Currently my workaround is to comment out all the aws_route_table_association blocks, then terraform apply, uncomment and then finish the job. Obviously this is very far from ideal.
BTW, I also tried the explicit dependency declaration like so:
resource "aws_route_table_association" "pub" {
count = "${length(aws_subnet.public.*.id)}"
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
route_table_id = "${aws_route_table.pub.id}"
depends_on = ["aws_subnet.public"]
}
But it didn't help.
$ terraform --version
Terraform v0.11.11
+ provider.aws v1.52.0