When deploying with some other components (of a data pipeline) like function app and cosmosdb, metrics graphs on azure event hubs portal are not appearing instead graphs show "Resource not found".
But when I deploy the same terraform code for event hubs namespace without other components, metric graphs appears. Here is terraform code:
locals {
ip_rule_map = flatten([
for cidr in ["182.191.83.208"] : [
{
action = "Allow"
ip_mask = cidr
}
]
])
}
resource "azurerm_eventhub_namespace" "avro-ingestion" {
name = "test-eh"
location = "Central US"
resource_group_name = "test-rg"
sku = "Standard"
capacity = 1
network_rulesets = false ? [{
default_action = "Deny"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}] : [
{
default_action = "Allow"
ip_rule = local.ip_rule_map
virtual_network_rule = []
trusted_service_access_enabled = true
}
]
tags = {
Name = "avro-ingestion"
Purpose = "data-ingestion-infra-deployment"
CreatedBy = "emumba"
}
}
resource "azurerm_eventhub_namespace_authorization_rule" "user_managed" {
name = "UserManagedSharedAccessKey"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
listen = true
send = true
manage = true
}
resource "null_resource" "schema-registry" {
depends_on = [
azurerm_eventhub_namespace.avro-ingestion
]
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
command = "az eventhubs namespace schema-registry create --name test-schema-group --namespace-name test-eh --resource-group test-rg --schema-compatibility Backward --schema-type Avro"
}
}
resource "azurerm_eventhub" "thunder" {
name = "test"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
resource_group_name = "test-rg"
partition_count = 2
message_retention = 1
}
resource "azurerm_eventhub_consumer_group" "function-app-cg" {
name = "fApp-cons-group"
namespace_name = azurerm_eventhub_namespace.avro-ingestion.name
eventhub_name = azurerm_eventhub.thunder.name
resource_group_name = "test-rg"
}
main.tf file where I am calling all modules along with event hubs namespace module:
resource "random_string" "random_Sacc1" {
length = 4
special = false
upper = false
min_lower = 1
min_numeric = 1
}
resource "random_string" "random_Sacc2" {
length = 2
special = false
upper = false
min_lower = 1
min_numeric = 1
}
module "azure-resource-group" {
source = "../../modules/resource-group"
region = var.region
res_group_name = var.res_group_name
}
module "azure-virtual-network" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/virtual-network"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
vn_name = var.vn_name
vn_cidrs = var.vn_cidrs
subnets = var.subnets
subnet_cidrs = var.subnet_cidrs
pub_nsg_name = var.pub_nsg_name
private_nsg_name = var.private_nsg_name
internet_ip_cidr_list = var.internet_ip_cidr_list
}
module "azure-ad-app-registration" {
depends_on = [
module.azure-resource-group
]
source = "../../modules/app-role-assignment"
app-display-name = var.app-display-name
rg_name = module.azure-resource-group.name
}
module "azure-eventhubs" {
source = "../../modules/event-hubs"
ns_name = var.eventhub_namespace_name
eventhub_name = var.eventhub_name
cons_group_name = var.cons_group_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
schema_group_name = var.eventhub_schema_group_name
}
module "azure-storage-account" {
depends_on = [
module.azure-virtual-network
]
source = "../../modules/storage-account"
storage_acc_name = "${var.storage_acc_name}${random_string.random_Sacc1.id}"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
}
module "azure-cosmos-db" {
source = "../../modules/cosmos-db"
acc_name = var.cosmos_acc_name
db_name = var.cosmos_db_name
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
cosmos_db_container_name = var.cosmos_db_container_name
enable_private_access = var.enable_private_access
cidr_list = var.public_cidr_list
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
dns_zone_name = var.dns_zone_name_private_ep
synapse_link = var.enable_synapse_link
}
# module "fApp-azure-storage-account" {
# source = "../../modules/storage-account"
# storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
# rg_name = module.azure-resource-group.name
# rg_location = module.azure-resource-group.location
# enable_private_access = var.enable_private_access
# cidr_list = var.public_cidr_list
# private_ep_subnet = element(module.azure-virtual-network.subnet_id_list, 1)
# dns_zone_name = var.dns_zone_name_private_ep
# }
module "data-ingestion-fApp" {
depends_on = [
module.azure-cosmos-db,
module.azure-eventhubs,
module.azure-storage-account
]
source = "../../modules/function-app"
rg_name = module.azure-resource-group.name
rg_location = module.azure-resource-group.location
application_insight_name = var.application_insight_name
fApp_service_plan_name = var.fApp_service_plan_name
fApp_name = var.fApp_name
fApp-storage_acc_name = "${var.storage_acc_fApp_name}${random_string.random_Sacc2.id}"
enable_private_access = var.enable_private_access
vnet_id_dns = module.azure-virtual-network.vnet-id
private_ep_subnet = module.azure-virtual-network.private-subent1-id
integration_vnet_name = module.azure-virtual-network.vnet-name
integration_subnet_name = module.azure-virtual-network.private-subent2-name
func_configurations = { "AZURE_CLIENT_ID" = module.azure-ad-app-registration.client_id
"AZURE_CLIENT_SECRET" = module.azure-ad-app-registration.client_secret,
"AZURE_TENANT_ID" = module.azure-ad-app-registration.tenant_id,
"EVENTHUB_NAME" = var.eventhub_name,
"EVENTHUB_FULLY_QUALIFIED_NAMESPACE" = "${var.eventhub_namespace_name}.servicebus.windows.net",
"SCHEMA_GROUP_NAME" = var.eventhub_schema_group_name,
"OUTPUT_CONTAINER" = var.storage_acc_container_name,
"OUTPUT_PATH" = var.storage_acc_container_path,
"COSMOS_DB_URI" = module.azure-cosmos-db.cosmos_account_uri,
"COSMOS_DB_PRIMARY_KEY" = module.azure-cosmos-db.cosmos_account_primary_key,
"COSMOS_DB_NAME" = var.cosmos_db_name,
"COSMOS_DB_CONTAINER_NAME" = var.cosmos_db_container_name,
"a10devops_namespace_connection" = module.azure-eventhubs.eventhub_conn_str,
"a10devops_storage_connection" = module.azure-storage-account.storage_account_conn_str }
}
Related
I want to deploy some AKS in Azure using Terraform but it is taking too much time to create more than 1 hour and the terraform job never finish. In the portal the AKS stay in Creating status. I'm deploying the AKS in the EastUS2 region
The vm size that I used are not too big so I don't know understand what could be the problem
This is my main file:
main.tf
# Configure the Microsoft Azure Provider.
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">= 2.26"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 1.24.6"
}
azapi = {
source = "azure/azapi"
version = ">=1.1.0"
}
}
required_version = ">= 0.14.9"
}
provider "azurerm" {
features {}
}
provider "kubernetes" {
host = module.aks.host
username = module.aks.username
password = module.aks.password
client_certificate = module.aks.client_certificate
client_key = base64decode(module.aks.client_key)
cluster_ca_certificate = base64decode(module.aks.cluster_ca_certificate)
}
provider "azapi" {
# subscription_id = data.azurerm_client_config.current.subscription_id
# tentat_id = data.azurerm_client_config.current.tenant_id
}
data "azurerm_client_config" "current" {}
module "ResourceGroup" {
source = "./ResourceGroup"
}
module "Networks" {
source = "./Networks"
resource_group_name = module.ResourceGroup.rg_name_out
location = module.ResourceGroup.rg_location_out
}
module "acr" {
source = "./ACR"
resource_group_name = module.ResourceGroup.rg_name_out
location = module.ResourceGroup.rg_location_out
name = var.acr_name
sku = var.acr_sku
admin_enabled = var.acr_admin_enabled
georeplication_locations = var.acr_georeplication_locations
soft_delete_policy_status = var.acr_soft_delete_policy_status
soft_delete_policy_days = var.acr_soft_delete_policy_days
identity_name = var.acr_identity_name
tags = var.tags
# depends_on = [module.StorageAccount]
}
module "aks" {
source = "./AKS"
location = module.ResourceGroup.rg_location_out
resource_group_name = module.ResourceGroup.rg_name_out
acr_id = module.acr.id
name = var.aks_cluster_name
kubernetes_version = var.aks_kubernetes_version
dns_prefix = lower(var.aks_cluster_name)
private_cluster_enabled = var.aks_private_cluster_enabled
automatic_channel_upgrade = var.aks_automatic_channel_upgrade
sku_tier = var.aks_sku_tier
identity_name = var.aks_identity_name
api_server_authorized_ip_ranges = [] #module.Networks.subnet_address_bastion
azure_policy_enabled = var.aks_azure_policy_enabled
http_application_routing_enabled = var.aks_http_application_routing_enabled
network_profile = var.aks_network_profile
aci_connector_linux = var.aks_aci_connector_linux
azure_ad_rbac_managed = var.aks_azure_ad_rbac_managed
tenant_id = data.azurerm_client_config.current.tenant_id
admin_group_object_ids = var.aks_admin_group_object_ids
azure_rbac_enabled = var.aks_azure_rbac_enabled
admin_username = var.aks_admin_username
ssh_public_key = var.aks_ssh_public_key
tags = var.tags
depends_on = [module.Networks, module.acr]
default_node_pool = {
name = "system"
vm_size = "Standard_D2s_v3"
node_count = 1
enable_auto_scaling = true
max_count = 1
min_count = 1
max_surge = "50%"
max_pods = 36
os_disk_size_gb = 50
os_disk_type = "Managed"
ultra_ssd_enabled = true
zones = ["1", "2","3"]
node_labels = { "workload" = "system" }
node_taints = [ "workload=system:NoSchedule" ]
vnet_subnet_id = module.Networks.subnet_id
orchestrator_version = var.aks_kubernetes_version
}
node_pools = [
{
name = "batch"
mode = "User"
vm_size = "Standard_D2s_v3"
node_count = 1
enable_auto_scaling = true
max_count = 1
min_count = 1
max_surge = "50%"
max_pods = 36
os_disk_size_gb = 50
os_disk_type = "Managed"
ultra_ssd_enabled = true
zones = ["1", "2","3"]
node_labels = { "workload" = "batch" }
node_taints = [ "workload=batch:NoSchedule" ]
vnet_subnet_id = module.Networks.subnet_id
orchestrator_version = var.aks_kubernetes_version
}
]
}
I can use ad input of module like variable to tag volume? I've this block:
module "jenkins" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "3.4.0"
name = "Jenkins"
ami = data.aws_ami.amazon_linux.id
instance_type = "t3.nano"
availability_zone = element(module.vpc.azs, 0)
subnet_id = element(module.vpc.public_subnets, 0)
vpc_security_group_ids = [module.jenkins-sg.security_group_id]
associate_public_ip_address = true
key_name = "my_key"
user_data = file("userdata/init.sh")
enable_volume_tags = false
root_block_device = [
{
encrypted = false
volume_type = "gp3"
volume_size = 15
throughput = 150
tags = {
Name = "${name}-root"
terraform = "true"
user_type = "ebs"
}
},
]
ebs_block_device = [
{
encrypted = false
device_name = "/dev/sdf"
volume_type = "gp3"
volume_size = 50
throughput = 150
}
]
volume_tags = {
Name = "${name}-data"
terraform = "true"
user_type = "ebs"
}
tags = {
terraform = "true"
user_type = "ec2"
}
}
As seen in "$ {name} -root" and "$ {name} -data", is it possible to tag with the input name "name"?
Thanks
You can't reference this module variable likes this. However, you can use a input variable (can be overridden from outside) or a local value (fixed value) to achieve this.
input variable
variable "ec2_instance_name" {
type = string
description = "The name of the ec2 instance"
default = "jenkins"
}
module "jenkins" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "3.4.0"
name = var.ec2_instance_name
ami = data.aws_ami.amazon_linux.id
instance_type = "t3.nano"
availability_zone = element(module.vpc.azs, 0)
subnet_id = element(module.vpc.public_subnets, 0)
vpc_security_group_ids = [module.jenkins-sg.security_group_id]
associate_public_ip_address = true
key_name = "my_key"
user_data = file("userdata/init.sh")
enable_volume_tags = false
root_block_device = [
{
encrypted = false
volume_type = "gp3"
volume_size = 15
throughput = 150
tags = {
Name = "${var.ec2_instance_name}-root"
terraform = "true"
user_type = "ebs"
}
},
]
ebs_block_device = [
{
encrypted = false
device_name = "/dev/sdf"
volume_type = "gp3"
volume_size = 50
throughput = 150
}
]
volume_tags = {
Name = "${var.ec2_instance_name}-data"
terraform = "true"
user_type = "ebs"
}
tags = {
terraform = "true"
user_type = "ec2"
}
}
local value
locals {
ec2_instance_name = "jenkins"
}
module "jenkins" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "3.4.0"
name = local.ec2_instance_name
ami = data.aws_ami.amazon_linux.id
instance_type = "t3.nano"
availability_zone = element(module.vpc.azs, 0)
subnet_id = element(module.vpc.public_subnets, 0)
vpc_security_group_ids = [module.jenkins-sg.security_group_id]
associate_public_ip_address = true
key_name = "my_key"
user_data = file("userdata/init.sh")
enable_volume_tags = false
root_block_device = [
{
encrypted = false
volume_type = "gp3"
volume_size = 15
throughput = 150
tags = {
Name = "${local.ec2_instance_name}-root"
terraform = "true"
user_type = "ebs"
}
},
]
ebs_block_device = [
{
encrypted = false
device_name = "/dev/sdf"
volume_type = "gp3"
volume_size = 50
throughput = 150
}
]
volume_tags = {
Name = "${local.ec2_instance_name}-data"
terraform = "true"
user_type = "ebs"
}
tags = {
terraform = "true"
user_type = "ec2"
}
}
I have a terraform resource in which I am trying to make a subnet_id variable dynamic. So I have varibles defined below in which subnet_id = "worker-subnet-1". I want to pass the name of the subnet and fetch the subnet id as I have multiple subnets. How can I do that.
resource "oci_containerengine_node_pool" "node_pool" {
for_each = var.nodepools
cluster_id = oci_containerengine_cluster.cluster[0].id
compartment_id = var.compartment_id
depends_on = [oci_containerengine_cluster.cluster]
kubernetes_version = var.cluster_kubernetes_version
name = each.value["name"]
node_config_details {
placement_configs {
availability_domain = var.availability_domain
subnet_id = oci_core_subnet.each.value["subnet_name"].id
}
size = each.value["size"]
}
node_shape = each.value["node_shape"]
node_shape_config {
#Optional
memory_in_gbs = each.value["memory"]
ocpus = each.value["ocpus"]
}
node_source_details {
image_id = each.value["image_id"]
source_type = "IMAGE"
}
ssh_public_key = file(var.ssh_public_key_path)
}
These are my variables:
nodepools = {
np1 = {
name = "np1"
size = 3
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
np2 = {
name = "np2"
size = 2
ocpus = 8
memory = 120
image_id = "test"
node_shape = "VM.Standard2.8"
subnet_name = "worker-subnet-1"
}
}
any suggestions?
resource "oci_core_subnet" "snet-workers" {
cidr_block = lookup(var.subnets["snet-workers"], "subnet_cidr")
compartment_id = var.compartment_id
vcn_id = oci_core_virtual_network.base_vcn.id
display_name = lookup(var.subnets["snet-workers"], "display_name")
dns_label = lookup(var.subnets["snet-workers"], "dns_label")
prohibit_public_ip_on_vnic = true
security_list_ids = [oci_core_security_list.private_worker_nodes.id]
route_table_id = oci_core_route_table.rt-nat.id
}
You have to use like below where change <local resource name> to the name you have given for your resource
subnet_id = oci_core_subnet.<local resource name>[each.value.subnet_id].id
I tried to build an ECS cluster with ALB in front using terraform. As I used dynamic port mappping the targets will not be registerd as healthy. I played with the healthcheck and Success codes if I set it to 301 everything is fine.
ECS
data "template_file" "mb_task_template" {
template = file("${path.module}/templates/marketplace-backend.json.tpl")
vars = {
name = "${var.mb_image_name}"
port = "${var.mb_port}"
image = "${aws_ecr_repository.mb.repository_url}"
log_group = "${aws_cloudwatch_log_group.mb.name}"
region = "${var.region}"
}
}
resource "aws_ecs_cluster" "mb" {
name = var.mb_image_name
}
resource "aws_ecs_task_definition" "mb" {
family = var.mb_image_name
container_definitions = data.template_file.mb_task_template.rendered
volume {
name = "mb-home"
host_path = "/ecs/mb-home"
}
}
resource "aws_ecs_service" "mb" {
name = var.mb_repository_url
cluster = aws_ecs_cluster.mb.id
task_definition = aws_ecs_task_definition.mb.arn
desired_count = 2
iam_role = var.aws_iam_role_ecs
depends_on = [aws_autoscaling_group.mb]
load_balancer {
target_group_arn = var.target_group_arn
container_name = var.mb_image_name
container_port = var.mb_port
}
}
resource "aws_autoscaling_group" "mb" {
name = var.mb_image_name
availability_zones = ["${var.availability_zone}"]
min_size = var.min_instance_size
max_size = var.max_instance_size
desired_capacity = var.desired_instance_capacity
health_check_type = "EC2"
health_check_grace_period = 300
launch_configuration = aws_launch_configuration.mb.name
vpc_zone_identifier = flatten([var.vpc_zone_identifier])
lifecycle {
create_before_destroy = true
}
}
data "template_file" "user_data" {
template = file("${path.module}/templates/user_data.tpl")
vars = {
ecs_cluster_name = "${var.mb_image_name}"
}
}
resource "aws_launch_configuration" "mb" {
name_prefix = var.mb_image_name
image_id = var.ami
instance_type = var.instance_type
security_groups = ["${var.aws_security_group}"]
iam_instance_profile = var.aws_iam_instance_profile
key_name = var.key_name
associate_public_ip_address = true
user_data = data.template_file.user_data.rendered
lifecycle {
create_before_destroy = true
}
}
resource "aws_cloudwatch_log_group" "mb" {
name = var.mb_image_name
retention_in_days = 14
}
ALB
locals {
target_groups = ["1", "2"]
}
resource "aws_alb" "mb" {
name = "${var.mb_image_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.mb_alb.id}"]
subnets = var.subnets
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_target_group" "mb" {
count = length(local.target_groups)
name = "${var.mb_image_name}-tg-${element(local.target_groups, count.index)}"
port = var.mb_port
protocol = "HTTP"
vpc_id = var.vpc_id
target_type = "instance"
health_check {
path = "/health"
protocol = "HTTP"
timeout = "10"
interval = "15"
healthy_threshold = "3"
unhealthy_threshold = "3"
matcher = "200-299"
}
lifecycle {
create_before_destroy = true
}
tags = {
Name = var.mb_image_name
}
}
resource "aws_alb_listener" "mb_https" {
load_balancer_arn = aws_alb.mb.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = module.dns.certificate_arn
default_action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
}
resource "aws_alb_listener_rule" "mb_https" {
listener_arn = aws_alb_listener.mb_https.arn
priority = 100
action {
type = "forward"
target_group_arn = aws_alb_target_group.mb.0.arn
}
condition {
field = "path-pattern"
values = ["/health/"]
}
}
Okay. Looks like the code above is working. I had different issue with networking.
When I update desired_count, the terraform planner shows that the operation will be an update in-place. However, when terraform tries to apply the changes, I get the following error:
Terraform v0.12.21
Initializing plugins and modules...
2020/03/05 22:10:52 [DEBUG] Using modified User-Agent: Terraform/0.12.21 TFC/8f5a579db5
module.web.aws_ecs_service.web[0]: Modifying... [id=arn:aws:ecs:us-east-1:55555:service/web/web]
Error: Error updating ECS Service (arn:aws:ecs:us-east-1:55555:service/web/web): InvalidParameterException: Unable to update network parameters on services with a CODE_DEPLOY deployment controller. Use AWS CodeDeploy to trigger a new deployment.
The terraform code used to reproduce this looks something like:
resource "aws_lb" "platform" {
name = "platform"
internal = false
load_balancer_type = "application"
ip_address_type = "ipv4"
security_groups = [aws_security_group.lb.id]
subnets = [for subnet in aws_subnet.lb : subnet.id]
enable_deletion_protection = true
tags = {
Name = "platform"
Type = "Public"
}
}
resource "aws_lb_target_group" "platform" {
count = 2
name = "platform-tg-${count.index + 1}"
vpc_id = var.vpc_id
protocol = "HTTP"
port = 80
target_type = "ip"
stickiness {
type = "lb_cookie"
enabled = false
}
health_check {
path = "/healthcheck"
port = var.container_port
protocol = "HTTP"
timeout = 5
healthy_threshold = 5
unhealthy_threshold = 3
matcher = "200"
}
tags = {
Name = "platform-tg-${count.index + 1}"
Type = "Public"
}
}
resource "aws_lb_listener" "platform-https" {
load_balancer_arn = aws_lb.platform.arn
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
certificate_arn = var.certificate_arn
depends_on = [aws_lb_target_group.platform]
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.platform[0].arn
}
lifecycle {
ignore_changes = [
default_action
]
}
}
locals {
family = "platform"
container_name = "web"
}
resource "aws_cloudwatch_log_group" "platform" {
name = "/aws/ecs/platform"
retention_in_days = 3653
tags = {
Name = "platform"
}
}
resource "aws_ecs_task_definition" "platform" {
family = local.family
requires_compatibilities = ["FARGATE"]
cpu = var.service.cpu
memory = var.service.memory
network_mode = "awsvpc"
execution_role_arn = aws_iam_role.ecs_task_execution.arn
task_role_arn = aws_iam_role.ecs_task_execution.arn
container_definitions = jsonencode(
jsondecode(
templatefile("${path.module}/taskdef.json", {
family = local.family
container_name = local.container_name
region = var.region
account_id = var.account_id
cpu = var.service.cpu
memory = var.service.memory
image = var.service.container_image
log_group = aws_cloudwatch_log_group.platform.name
node_env = var.node_env
port = var.container_port
platform_url = var.platform_url
short_url = var.short_url
cdn_url = var.cdn_url
})
).containerDefinitions
)
tags = {
Name = "platform"
Type = "Private"
}
}
resource "aws_ecs_cluster" "platform" {
name = "platform"
setting {
name = "containerInsights"
value = "enabled"
}
tags = {
Name = "platform"
Type = "Public"
}
}
data "aws_lb_listener" "current-platform" {
arn = aws_lb_listener.platform-https.arn
}
data "aws_ecs_task_definition" "current-platform" {
task_definition = local.family
}
resource "aws_ecs_service" "platform" {
count = var.delete_platform_ecs_service ? 0 : 1
name = "platform"
cluster = aws_ecs_cluster.platform.arn
launch_type = "FARGATE"
desired_count = var.service.container_count
enable_ecs_managed_tags = true
task_definition = "${aws_ecs_task_definition.platform.family}:${max(aws_ecs_task_definition.platform.revision, data.aws_ecs_task_definition.current-platform.revision)}"
depends_on = [aws_lb_target_group.platform]
load_balancer {
target_group_arn = data.aws_lb_listener.current-platform.default_action[0].target_group_arn
container_name = local.container_name
container_port = var.container_port
}
network_configuration {
subnets = sort([for subnet in aws_subnet.ecs : subnet.id])
security_groups = [aws_security_group.ecs.id]
}
deployment_controller {
type = "CODE_DEPLOY"
}
lifecycle {
// NOTE: Based on: https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
// If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created.
ignore_changes = [
load_balancer,
network_configuration,
task_definition
]
}
tags = {
Name = "platform"
Type = "Private"
}
}
This is using Terraform v0.12.21. Full debug output is available at: https://gist.github.com/jgeurts/f4d930608a119e9cd75a7a54b111ee7c
This is maybe not the best answer, but I wasn't able to get terraform to adjust only the desired_count. Instead, I added auto scaling to the ECS service:
Ignore desired_count:
resource "aws_ecs_service" "platform" {
...
lifecycle {
// NOTE: Based on: https://docs.aws.amazon.com/cli/latest/reference/ecs/update-service.html
// If the network configuration, platform version, or task definition need to be updated, a new AWS CodeDeploy deployment should be created.
ignore_changes = [
desired_count, # Preserve desired count when updating an autoscaled ECS Service
load_balancer,
network_configuration,
task_definition,
]
}
}
Add auto-scaling:
resource "aws_appautoscaling_target" "platform" {
max_capacity = var.max_capacity
min_capacity = var.min_capacity
resource_id = "service/${aws_ecs_cluster.platform.name}/${aws_ecs_cluster.platform.name}"
scalable_dimension = "ecs:service:DesiredCount"
service_namespace = "ecs"
depends_on = [
aws_ecs_cluster.platform,
]
}
resource "aws_appautoscaling_policy" "platform" {
name = "platform-auto-scale"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
policy_type = "TargetTrackingScaling"
target_tracking_scaling_policy_configuration {
target_value = var.service.autoscale_target_cpu_percentage
scale_out_cooldown = 60
scale_in_cooldown = 300
predefined_metric_specification {
predefined_metric_type = "ECSServiceAverageCPUUtilization"
}
}
}
resource "aws_appautoscaling_scheduled_action" "platform_0430_increase_min_capacity" {
name = "platform-0430-increase-min-capacity"
schedule = "cron(30 4 * * ? *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity + 4
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_0615_restore_min_capacity" {
name = "platform-0615-restore-min-capacity"
schedule = "cron(15 06 * * ? *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_weekday_0945_increase_min_capacity" {
name = "platform-weekday-0945-increase-min-capacity"
schedule = "cron(45 9 ? * MON-FRI *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity + 4
max_capacity = var.max_capacity
}
}
resource "aws_appautoscaling_scheduled_action" "platform_weekday_2100_restore_min_capacity" {
name = "platform-weekday-2100-restore-min-capacity"
schedule = "cron(0 2100 ? * MON-FRI *)"
service_namespace = aws_appautoscaling_target.platform.service_namespace
resource_id = aws_appautoscaling_target.platform.resource_id
scalable_dimension = aws_appautoscaling_target.platform.scalable_dimension
scalable_target_action {
min_capacity = var.min_capacity
max_capacity = var.max_capacity
}
}