Digital Ocean VPC Droplets unable to ssh to each other - security

I have created a VPC containing 4 droplets: a Bastion which only allows ssh from a single IP, a public web server, a private CMS to feed the public web server, and a secure data store that is encrypted at rest. I have been working on the firewall rules to allow all droplets within the VPC to speak to each other (ssh, 443), and have been completely unsuccessful.
I have a firewall that allows ssh into the Bastion from our VPN IP - this works perfectly. The idea is to ssh into the Bastion then into the other droplets.
There is one ssh key, which is added to authorized_keys for each droplet.
Here is my terraform for the firewall:
resource "digitalocean_firewall" "secure_network" {
name = "secure-network"
droplet_ids = [digitalocean_droplet.www-sds.id, digitalocean_droplet.www-fe.id, digitalocean_droplet.www-cms.id]
inbound_rule {
protocol = "tcp"
port_range = "22"
source_addresses = [digitalocean_droplet.bastion.ipv4_address_private]
}
outbound_rule {
protocol = "icmp"
destination_addresses = ["0.0.0.0/0", "::/0"]
}
outbound_rule {
protocol = "tcp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
}
outbound_rule {
protocol = "udp"
port_range = "1-65535"
destination_addresses = ["0.0.0.0/0", "::/0"]
}
}
resource "digitalocean_firewall" "vpn-only-access" {
name = "vpn-only-access"
droplet_ids = [digitalocean_droplet.bastion.id]
inbound_rule {
protocol = "tcp"
port_range = "22"
source_addresses = ["159.89.140.157"]
}
}
resource "digitalocean_firewall" "public-access" {
name = "public-access"
droplet_ids = [digitalocean_droplet.www-fe.id]
inbound_rule {
protocol = "tcp"
port_range = "443"
source_addresses = [digitalocean_droplet.www-fe.ipv4_address]
}
inbound_rule {
protocol = "tcp"
port_range = "80"
source_addresses = [digitalocean_droplet.www-fe.ipv4_address]
}
}

Related

Azure Firewall: Most common Azure Firewall Policy Rule Collection Rules

I am asked to configure the Azure Firewall Policy Rule collection with most commonly used Network Rules and Application Rules.
I have gathered the following details where in I have captured the most commonly used Network Rules and Application Rules. However I am not sure if I am missing anything that is considered as the most common rule?
resource "azurerm_firewall_policy_rule_collection_group" "fwpolrcg" {
name = "fwpolicy-rcg"
firewall_policy_id = azurerm_firewall_policy.fwpol.id
priority = 100
network_rule_collection {
name = "network_rule_collection1"
priority = 100
action = "Allow"
rule {
name = "AllowHubToSpokeRDP"
protocols = ["TCP","UDP"]
source_addresses = var.hub_firewall_ip_range
destination_addresses = var.spoke_firewall_ip_range
destination_ports = ["3389"]
}
rule {
name = "AllowSpokeToHubRDP"
protocols = ["TCP","UDP"]
source_addresses = var.spoke_firewall_ip_range
destination_addresses = var.hub_firewall_ip_range
destination_ports = ["3389"]
}
rule {
name = "AllowHubToSpokeHTTPS"
protocols = ["TCP"]
source_addresses = var.hub_firewall_ip_range
destination_addresses = var.spoke_firewall_ip_range
destination_ports = ["443"]
}
rule {
name = "AllowSpokeToHubHTTPS"
protocols = ["TCP"]
source_addresses = var.spoke_firewall_ip_range
destination_addresses = var.hub_firewall_ip_range
destination_ports = ["443"]
}
rule {
name = "AllowHubToSpokeDNS"
protocols = ["TCP","UDP"]
source_addresses = var.hub_firewall_ip_range
destination_addresses = var.spoke_firewall_ip_range
destination_ports = ["53"]
}
rule {
name = "AllowSpokeToHubDNS"
protocols = ["TCP","UDP"]
source_addresses = var.spoke_firewall_ip_range
destination_addresses = var.hub_firewall_ip_range
destination_ports = ["53"]
}
}
application_rule_collection {
name = "application_rule_collection1"
priority = 100
action = "Allow"
rule {
name = "Windows Update"
source_addresses = ["*"]
fqdn_tags = [
"AppServiceEnvironment",
"AzureBackup",
"AzureKubernetesService",
"HDInsight",
"MicrosoftActiveProtectionService",
"WindowsDiagnostics",
"WindowsUpdate",
"WindowsVirtualDesktop"]
}
rule {
name = "AllowMicrosoftFqdns"
source_addresses = ["*"]
destination_fqdns = [
"*.cdn.mscr.io",
"mcr.microsoft.com",
"*.data.mcr.microsoft.com",
"management.azure.com",
"login.microsoftonline.com",
"acs-mirror.azureedge.net",
"dc.services.visualstudio.com",
"*.opinsights.azure.com",
"*.oms.opinsights.azure.com",
"*.microsoftonline.com",
"*.monitoring.azure.com",
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowFqdnsForOsUpdates"
source_addresses = ["*"]
destination_fqdns = [
"download.opensuse.org",
"security.ubuntu.com",
"ntp.ubuntu.com",
"packages.microsoft.com",
"snapcraft.io"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowImagesFqdns"
source_addresses = ["*"]
destination_fqdns = [
"auth.docker.io",
"registry-1.docker.io",
"production.cloudflare.docker.com"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowAzure"
source_addresses = ["*"]
destination_fqdns = [
"*.azure.*"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
}
rule {
name = "AllowBing"
source_addresses = ["*"]
destination_fqdns = [
"*.bing.com"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
rule {
name = "AllowGoogle"
source_addresses = ["*"]
destination_fqdns = [
"*.google.com"
]
protocols {
port = "80"
type = "Http"
}
protocols {
port = "443"
type = "Https"
}
}
depends_on = [azurerm_firewall_policy.fwpol]
}
I tried to reproduce the same in my environment to create Azure Firewall Policy Rule Collection Rules using Terraform:
Note: Make sure that define all rules in collection section inorder to block or deny the action.
See the document to create Azure Firewall Collection Group using Terraform.
Terraform code:
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "Thejesh" {
name = "Thejesh-resources"
location = "West Europe"
}
resource "azurerm_firewall_policy" "example" {
name = "example-fwpolicy"
resource_group_name = azurerm_resource_group.Thejesh.name
location = azurerm_resource_group.Thejesh.location
}
resource "azurerm_firewall_policy_rule_collection_group" "example" {
name = "example-fwpolicy-rcg"
firewall_policy_id = azurerm_firewall_policy.example.id
priority = 500
application_rule_collection {
name = "app_rule_collection1"
priority = 500
action = "Deny"
rule {
name = "app_rule_collection1_rule1"
protocols {
type = "Http"
port = 80
}
protocols {
type = "Https"
port = 443
}
source_addresses = ["10.0.0.1"]
destination_fqdns = ["*.microsoft.com","*.cdn.mscr.io",
"mcr.microsoft.com",
"*.data.mcr.microsoft.com",
"management.azure.com",
"login.microsoftonline.com",
"acs-mirror.azureedge.net",
"dc.services.visualstudio.com",
"*.opinsights.azure.com",
"*.oms.opinsights.azure.com",
"*.microsoftonline.com",
"*.monitoring.azure.com",]
}
}
network_rule_collection {
name = "network_rule_collection1"
priority = 400
action = "Deny"
rule {
name = "network_rule_collection1_rule1"
protocols = ["TCP", "UDP"]
source_addresses = ["10.0.0.1"]
destination_addresses = ["192.168.1.1", "192.168.1.2"]
destination_ports = ["80", "1000-2000"]
}
}
nat_rule_collection {
name = "nat_rule_collection1"
priority = 300
action = "Dnat"
rule {
name = "nat_rule_collection1_rule1"
protocols = ["TCP", "UDP"]
source_addresses = ["10.0.0.1", "10.0.0.2"]
destination_address = "192.168.1.1"
destination_ports = ["80"]
translated_address = "192.168.0.1"
translated_port = "8080"
}
}
}
Terraform plan:
Terraform Apply
Once ran the code resources created with Azure Firewall Policy.
Rule collection inside Azure Firewall.
Application Rules in Azure Firewall:

Unable to connect to SonarQube private IP with port 9000

I am having some trouble and was needing some assistance.
I have set up a SonarQube instance on a machine in Azure, and I am trying to connect to it through its private IP address and port 9000. However, I am unable to connect and get a "connection timed out" error.
Here are the steps I have taken so far:
Checked the firewall rules: The firewall on the machine is not blocking incoming traffic on port 9000.
Checked the IP address: The private IP address of the machine is correct.
Checked the port: Port 9000 is the correct port for my SonarQube instance.
Checked the logs: There are no error messages related to the connection issue in the logs.
Restarted the SonarQube instance: Restarting the instance did not resolve the issue.
What else can I do to resolve this issue and connect to my SonarQube instance?
Note: I am using a Linux machine and bash commands.
Here is terraform code in case I did something incorrectly.
provider "azurerm" {
features {}
}
locals {
sonarqube_image_name = "sonarqube:9.9-community"
sonarqube_container_name = "sonarqube-container"
postgres_container_name = "postgres-container"
}
resource "azurerm_resource_group" "examplegroup" {
name = "example-rg"
location = "South Central US"
}
resource "azurerm_network_security_group" "nsg-example-sonargroup" {
name = "nsg-example-sonargroup"
location = azurerm_resource_group.sonargroup.location
resource_group_name = azurerm_resource_group.sonargroup.name
}
resource "azurerm_virtual_network" "example-sonar-vnet" {
name = "example-sonar-vnet"
location = azurerm_resource_group.sonargroup.location
resource_group_name = azurerm_resource_group.sonargroup.name
address_space = ["10.0.0.0/16"]
}
resource "azurerm_subnet" "example-sonar-subnet" {
name = "sonar-subnet"
resource_group_name = azurerm_resource_group.sonargroup.name
virtual_network_name = azurerm_virtual_network.example-sonar-vnet.name
address_prefixes = ["10.0.0.0/16"]
delegation {
name = "delegation"
service_delegation {
name = "Microsoft.ContainerInstance/containerGroups"
actions = ["Microsoft.Network/virtualNetworks/subnets/join/action", "Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action"]
}
}
}
resource "azurerm_container_group" "sonarqube" {
name = "sonarqube-group"
location = azurerm_resource_group.sonargroup.location
resource_group_name = azurerm_resource_group.sonargroup.name
ip_address_type = "Private"
os_type = "Linux"
subnet_ids = [azurerm_subnet.example-sonar-subnet.id]
container {
name = local.sonarqube_container_name
image = local.sonarqube_image_name
cpu = 1
memory = 1.5
ports {
port = 9000
}
environment_variables = {
SONARQUBE_JDBC_URL = "jdbc:postgresql://postgres-container:5432/sonarqube_db"
SONARQUBE_JDBC_USERNAME = "example_user"
SONARQUBE_JDBC_PASSWORD = "example_password"
}
}
container {
name = local.postgres_container_name
image = "postgres:11"
cpu = 1
memory = 2
ports {
port = 5432
}
environment_variables = {
POSTGRES_DB = "example_db"
POSTGRES_USER = "example_user"
POSTGRES_PASSWORD = "example_password"
}
}
}
output "private_ip_address" {
value = azurerm_container_group.sonarqube.ip_address
}

two frontend ports of application gateway are using the same port 443 - Azure application gateway in terraform

I am configuring azure application gateway using terraform.
Following is the module that i wrote:
locals {
backend_address_pool_name = format("appgwbeap-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
frontend_port_name = format("appgwfeport-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
frontend_ip_configuration_name = format("appgwfeip-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
http_setting_name = format("appgwhtst-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
listener_name = format("appgwhttplstnr-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
request_routing_rule_name = format("appgwrqrt-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
redirect_configuration_name = format("appgwrdrcfg-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
}
resource "azurerm_application_gateway" "appgw" {
name = format("appgw-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
resource_group_name = var.rg_name
location = var.location
sku {
name = var.sku_name
tier = var.sku_tier
capacity = var.sku_capacity
}
gateway_ip_configuration {
name = format("appgwipcfg-%[1]s-%[2]s%[3]sweb-gw",var.project_code,var.env,var.zone)
subnet_id = var.subnet_id
}
frontend_port {
name = "appgwfeport-app1-uatizweb-gw"
port = "443"
}
frontend_port {
name = "appgwfeport-app2-uatizweb-gw"
port = "443"
}
ssl_certificate {
name = "UAT-APP1-APPGW-SSL-CERT-SGCORE-12Jan21-12Jan23"
data = filebase64("./certificates/web.app1.sso.gwwu.xxx.com.de-12Jan2021.pfx")
password = "${var.app1_pfx_password}"
}
authentication_certificate {
name = "UAT-APP1-APPGW-SSL-CERT-SGCORE-12Jan21-12Jan23"
data = file("./certificates/web_app1_sso_gwwu_xxx_com_de-12Jan21.cer")
}
ssl_certificate {
name = "UAT-APP2-APPGW-SSL-CERT-01Mar21"
data = filebase64("./certificates/selfsigned-app2-uat-01Mar21.pfx")
password = "${var.app1_pfx_password}"
}
authentication_certificate {
name = "UAT-APP2-APPGW-SSL-CERT-01Mar21"
data = file("./certificates/selfsigned-app2-uat-01Mar21.cer")
}
frontend_ip_configuration {
name = "${local.frontend_ip_configuration_name}"
subnet_id = var.subnet_id
private_ip_address = var.frontend_private_ip
private_ip_address_allocation = "Static"
}
backend_address_pool {
name = "beap-path-app1-app"
#fqdns = var.fqdn_list
ip_addresses = ["10.xxx.xxx.36"]
}
backend_address_pool {
name = "beap-path-app2-app"
#fqdns = var.fqdn_list
ip_addresses = ["10.xxx.xxx.37"]
}
backend_http_settings {
name = "behs-path-app1-app"
cookie_based_affinity = var.backend_cookie_based_affinity
affinity_cookie_name = "ApplicationGatewayAffinity"
path = var.backend_path
port = "443"
#probe_name = "probe-app1"
protocol = "Https"
request_timeout = var.backend_request_timeout
authentication_certificate {
name = "UAT-APP1-APPGW-SSL-CERT-SGCORE-12Jan21-12Jan23"
}
}
backend_http_settings {
name = "behs-path-app2-app"
cookie_based_affinity = var.backend_cookie_based_affinity
affinity_cookie_name = "ApplicationGatewayAffinity"
path = var.backend_path
port = "443"
#probe_name = "probe-app2"
protocol = "Https"
request_timeout = var.backend_request_timeout
authentication_certificate {
name = "UAT-APP2-APPGW-SSL-CERT-01Mar21"
}
}
http_listener {
name = "appgwhttplsnr-app1-uatizweb-gw"
frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}"
frontend_port_name = "appgwfeport-app1-uatizweb-gw"
protocol = "Https"
ssl_certificate_name = "UAT-APP1-APPGW-SSL-CERT-SGCORE-12Jan21-12Jan23"
require_sni = true
host_name = "web.app1.sso.gwwu.xxx.com.de"
}
http_listener {
name = "appgwhttplsnr-app2-uatizweb-gw"
frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}"
frontend_port_name = "appgwfeport-app2-uatizweb-gw"
ssl_certificate_name = "UAT-APP2-APPGW-SSL-CERT-01Mar21"
require_sni = true
protocol = "Https"
host_name = "web.app2.sso.gwwu.xxx.com.de"
}
request_routing_rule {
name = "appgwrqrt-app2-uatizweb-gw"
rule_type = var.backend_rule_type
http_listener_name = "appgwhttplsnr-app2-uatizweb-gw"
backend_address_pool_name = "beap-path-app2-app"
backend_http_settings_name = "behs-path-app2-app"
}
request_routing_rule {
name = "appgwrqrt-app1-uatizweb-gw"
rule_type = var.backend_rule_type
http_listener_name = "appgwhttplsnr-app1-uatizweb-gw"
backend_address_pool_name = "beap-path-app1-app"
backend_http_settings_name = "behs-path-app1-app"
}
}
Below is the main.tf that calls the module:
module "app_gateway" {
source = "../../../modules/appgateway"
rg_name = var.rg_name
agency = local.agency
project_code = local.project_code
env = var.env
zone = var.zone
tier = "appgw"
location = local.location
vnet_name = var.vnet_name
subnet_id = module.agw_subnet.subnet_id
sku_name = var.appgw_sku_name
sku_capacity = var.appgw_sku_capacity
frontend_private_ip = var.appgw_frontend_ip
frontend_port = var.frontend_port
frontend_protocol = var.frontend_protocol
app1_pfx_password = "${var.app1_pfx_password}"
backend_protocol = var.backend_protocol
backend_port = var.backend_port
backend_path = "/"
providers = {
azurerm = azurerm.corpapps
}
}
I have used Multi-site, However when i deploy -i get the following error:
two frontend ports of application gateway are using the same port number 443.
When i change one of my port to 5443 - it does get deployed and works from terraform.
Also, i can create two frontend port with 443 (multi-site) from portal.Can't do this from terraform.
What am i missing from terraform.
Any light on this will help!
We ran into the same error when updating an App Gateway via a PowerShell script.
Scenario:
There was an existing multi-site listener using the FrontendPort for 80. When the script tried to add a second multi-site listener on that same port, we got the same error message.
It turned out that the original listener was on the public Frontend IP while the the second one being added was using the Private Frontend IP. I didn't realize this, but you can NOT use the same Frontend Port for both a public listener and a private listener even if they are both multi-site.
The original listener shouldn't have been public IP, anyway, so once I tweaked the original listener to use the private IP, the script executed without error.
I found the explanation about Private and Public IP's not being able to share the same port here:
https://github.com/MicrosoftDocs/azure-docs/issues/23652
Maybe this will help someone else.
We could use the same frontend configuration(frontend IP, protocol, port or name) for multi-sites listener instead of creating two frontend_port names.
For example, change the related codes:
resource "azurerm_application_gateway" "appgw" {
#..
frontend_port {
name = "appgwfeport-app1-uatizweb-gw"
port = "443"
}
# frontend_port {
# name = "appgwfeport-app2-uatizweb-gw"
# port = "443"
# }
#..
http_listener {
name = "appgwhttplsnr-app1-uatizweb-gw"
frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}"
frontend_port_name = "appgwfeport-app1-uatizweb-gw"
protocol = "Https"
ssl_certificate_name = "UAT-APP1-APPGW-SSL-CERT-SGCORE-12Jan21-12Jan23"
require_sni = true
host_name = "web.app1.sso.gwwu.xxx.com.de"
}
http_listener {
name = "appgwhttplsnr-app2-uatizweb-gw"
frontend_ip_configuration_name = "${local.frontend_ip_configuration_name}"
frontend_port_name = "appgwfeport-app1-uatizweb-gw" #change here
ssl_certificate_name = "UAT-APP2-APPGW-SSL-CERT-01Mar21"
require_sni = true
protocol = "Https"
host_name = "web.app2.sso.gwwu.xxx.com.de"
}
}
For more information, read https://learn.microsoft.com/en-us/azure/application-gateway/tutorial-multiple-sites-powershell and https://learn.microsoft.com/en-us/azure/application-gateway/create-multiple-sites-portal#configuration-tab
Maybe this link will be helpful: https://learn.microsoft.com/en-us/azure/application-gateway/application-gateway-faq#can-i-use-the-same-port-for-both-public-facing-and-private-facing-listeners
The short answer is - it is not possible to use the same port private and public listeners.
As a workaround I used another port like 10443 for https private listener configuration. In my case it worked fine because users did not use private listener
azure-cli was outdated in our case. After upgrade it all started to work like a charm.
We had an Application Gateway set up by Terraform with two multi-site public listeners, both using the same 443 port. The mentioned error Two Http Listeners of Application Gateway <..> and <..> are using the same Frontend Port <..> and FrontendIpConfiguration <..> was happening when outdated az cli was trying to az network application-gateway ssl-cert update --key-vault-secret-id <..>. azure-cli initial: 2.2.0, final: 2.39.0. After upgrade az network application-gateway ssl-cert update started to update GW's cert as expected.

Mounting EFS to AWS Fargate using Terraform - PlatformTaskDefinitionIncompatibilityException error:

I'm receiving this curious error message
PlatformTaskDefinitionIncompatibilityException: The specified platform does not satisfy the task definition’s required capabilities
I suspect it's something to do with this line although not quite sure
file_system_id = aws_efs_file_system.main.id
This is my script:
provider "aws" {
region = "us-east-1"
profile = var.profile
}
### Network
# Fetch AZs in the current region
data "aws_availability_zones" "available" {}
resource "aws_vpc" "main" {
cidr_block = "172.17.0.0/16"
}
# Create var.az_count private subnets, each in a different AZ
resource "aws_subnet" "private" {
count = "${var.az_count}"
cidr_block = "${cidrsubnet(aws_vpc.main.cidr_block, 8, count.index)}"
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
vpc_id = "${aws_vpc.main.id}"
}
# Create var.az_count public subnets, each in a different AZ
resource "aws_subnet" "public" {
count = "${var.az_count}"
cidr_block = "${cidrsubnet(aws_vpc.main.cidr_block, 8, var.az_count + count.index)}"
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
vpc_id = "${aws_vpc.main.id}"
map_public_ip_on_launch = true
}
# IGW for the public subnet
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.main.id}"
}
# Route the public subnet traffic through the IGW
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.main.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
# Create a NAT gateway with an EIP for each private subnet to get internet connectivity
resource "aws_eip" "gw" {
count = "${var.az_count}"
vpc = true
depends_on = ["aws_internet_gateway.gw"]
}
resource "aws_nat_gateway" "gw" {
count = "${var.az_count}"
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
allocation_id = "${element(aws_eip.gw.*.id, count.index)}"
}
# Create a new route table for the private subnets
# And make it route non-local traffic through the NAT gateway to the internet
resource "aws_route_table" "private" {
count = "${var.az_count}"
vpc_id = "${aws_vpc.main.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.gw.*.id, count.index)}"
}
}
# Explicitely associate the newly created route tables to the private subnets (so they don't default to the main route table)
resource "aws_route_table_association" "private" {
count = "${var.az_count}"
subnet_id = "${element(aws_subnet.private.*.id, count.index)}"
route_table_id = "${element(aws_route_table.private.*.id, count.index)}"
}
### Security
# ALB Security group
# This is the group you need to edit if you want to restrict access to your application
resource "aws_security_group" "lb" {
name = "tf-ecs-alb"
description = "controls access to the ALB"
vpc_id = "${aws_vpc.main.id}"
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Traffic to the ECS Cluster should only come from the ALB
resource "aws_security_group" "ecs_tasks" {
name = "tf-ecs-tasks"
description = "allow inbound access from the ALB only"
vpc_id = "${aws_vpc.main.id}"
ingress {
protocol = "tcp"
from_port = "${var.app_port}"
to_port = "${var.app_port}"
security_groups = ["${aws_security_group.lb.id}"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
### ALB
resource "aws_alb" "main" {
name = "tf-ecs-chat"
subnets = aws_subnet.public.*.id
security_groups = ["${aws_security_group.lb.id}"]
}
resource "aws_alb_target_group" "app" {
name = "tf-ecs-chat"
port = 80
protocol = "HTTP"
vpc_id = "${aws_vpc.main.id}"
target_type = "ip"
}
# Redirect all traffic from the ALB to the target group
resource "aws_alb_listener" "front_end" {
load_balancer_arn = "${aws_alb.main.id}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.app.id}"
type = "forward"
}
}
### ECS
resource "aws_ecs_cluster" "main" {
name = "tf-ecs-cluster"
}
resource "aws_ecs_task_definition" "app" {
family = "app"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = "${var.fargate_cpu}"
memory = "${var.fargate_memory}"
task_role_arn = "${aws_iam_role.ecs_task_role_role.arn}"
execution_role_arn = "${aws_iam_role.ecs_task_role_role.arn}"
container_definitions = <<DEFINITION
[
{
"cpu": ${var.fargate_cpu},
"image": "${var.app_image}",
"memory": ${var.fargate_memory},
"name": "app",
"networkMode": "awsvpc",
"portMappings": [
{
"containerPort": ${var.app_port},
"hostPort": ${var.app_port}
}
]
}
]
DEFINITION
volume {
name = "efs-html"
efs_volume_configuration {
file_system_id = aws_efs_file_system.main.id
root_directory = "/opt/data"
}
}
}
resource "aws_ecs_service" "main" {
name = "tf-ecs-service"
cluster = "${aws_ecs_cluster.main.id}"
task_definition = "${aws_ecs_task_definition.app.arn}"
desired_count = "${var.app_count}"
launch_type = "FARGATE"
network_configuration {
security_groups = ["${aws_security_group.ecs_tasks.id}"]
subnets = aws_subnet.private.*.id
}
load_balancer {
target_group_arn = "${aws_alb_target_group.app.id}"
container_name = "app"
container_port = "${var.app_port}"
}
depends_on = [
"aws_alb_listener.front_end",
]
}
# ECS roles & policies
# Create the IAM task role for ECS Task definition
resource "aws_iam_role" "ecs_task_role_role" {
name = "test-ecs-task-role"
assume_role_policy = "${file("ecs-task-role.json")}"
tags = {
Terraform = "true"
}
}
# Create the AmazonECSTaskExecutionRolePolicy managed role
resource "aws_iam_policy" "ecs_task_role_policy" {
name = "test-ecs-AmazonECSTaskExecutionRolePolicy"
description = "Provides access to other AWS service resources that are required to run Amazon ECS tasks"
policy = "${file("ecs-task-policy.json")}"
}
# Assign the AmazonECSTaskExecutionRolePolicy managed role to ECS
resource "aws_iam_role_policy_attachment" "ecs_task_policy_attachment" {
role = "${aws_iam_role.ecs_task_role_role.name}"
policy_arn = "${aws_iam_policy.ecs_task_role_policy.arn}"
}
resource "aws_efs_file_system" "main" {
tags = {
Name = "ECS-EFS-FS"
}
}
resource "aws_efs_mount_target" "main" {
count = "${var.subnets-count}"
file_system_id = "${aws_efs_file_system.main.id}"
subnet_id = "${element(var.subnets, count.index)}"
}
variables.tf
variable "az_count" {
description = "Number of AZs to cover in a given AWS region"
default = "2"
}
variable "app_image" {
description = "Docker image to run in the ECS cluster"
default = "xxxxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/test1:nginx"
}
variable "app_port" {
description = "Port exposed by the docker image to redirect traffic to"
# default = 3000
default = 80
}
variable "app_count" {
description = "Number of docker containers to run"
default = 2
}
variable "fargate_cpu" {
description = "Fargate instance CPU units to provision (1 vCPU = 1024 CPU units)"
default = "256"
}
variable "fargate_memory" {
description = "Fargate instance memory to provision (in MiB)"
default = "512"
}
################
variable "subnets" {
type = "list"
description = "list of subnets to mount the fs to"
default = ["subnet-xxxxxxx","subnet-xxxxxxx"]
}
variable "subnets-count" {
type = "string"
description = "number of subnets to mount to"
default = 2
}
You simply require to upgrade your ecs service to latest version
resource "aws_ecs_service" "service" {
platform_version = "1.4.0"
launch_type = "FARGATE"
...
}
efs feature is only available on the latest version
When you don’t specify platform_version, it will default to LATEST which is set to 1.3.0 which doesn’t allow efs volumes.
UPDATE: As of 1/21/22, it seems that the LATEST ECS service version is 1.4.0, so explicitly specifying the ECS platform version is no longer necessary to have EFS mounts work. Per:
https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform-linux-fargate.html

How to use resource reference into terraform workspace?

Currently, I am using terraform workspace to deploy the same code into multiple environments. But in right now I am stuck in referring resource in a specific workspace.
example of code
resource "aws_security_group" "testing-ec2" {
name = "${local.env}-testing-ec2"
vpc_id = "${aws_vpc.vpc.id}"
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
security_groups = ["${local.security-groups}"]
}
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["${local.bastion_ip}"]
}
egress {
from_port = 0
to_port = 0
protocol = -1
cidr_blocks = ["0.0.0.0/0"]
}
}
workspace security group
local {
tf_security-groups = {
dev = ""
stg = "${aws_security_group.test-private-alb.id}"
qa = "${aws_security_group.test1-private-alb.id}"
prod = "${aws_security_group.test2-private-alb.id}"
}
security-groups = "${lookup(local.tf_security-groups,local.env)}"
}
when I am trying to apply into stg workspace this error appears
* local.tf_security-groups: local.tf_security-groups: Resource 'aws_security_group.test1-private-alb' not found for variable 'aws_security_group.test1-private-alb.id'
You could use the data source terraform_remote_state to sift through the state but you'd also have to make each of your security group ids into outputs.
data "terraform_remote_state" "this" {
backend = "s3"
workspace = "stg"
config {
bucket = ""
key = ""
region = ""
}
}
It would be cleaner to use the aws_security_group data source instead.
locals {
env = "qa"
security_group_map = {
stg = data.aws_security_group.test_private_alb.id
qa = data.aws_security_group.test1_private_alb.id
prod = data.aws_security_group.test2_private_alb.id
}
security_groups = lookup(local.security_group_map, local.env, "")
}
data "aws_security_group" "test_private_alb" {
name = "test_private_alb"
}
data "aws_security_group" "test1_private_alb" {
name = "test1_private_alb"
}
data "aws_security_group" "test2_private_alb" {
name = "test2_private_alb"
}

Resources