I know this question has been asked before, and I've seen several of the SO responses and read the AWS docs on the subject... I have a terraform module that, in part, builds out an ECS service, cluster, task, and Fargate container:
###############################################################################
#### EFS for added stoage
#### TODO: remove in favor of larger ephmemeral storage when terraform supports it
###############################################################################
resource "aws_efs_file_system" "test" {
creation_token = var.fargate_container_name
tags = {
Name = "test"
}
}
resource "aws_efs_access_point" "test" {
file_system_id = aws_efs_file_system.test.id
root_directory {
path = "/"
}
}
resource "aws_efs_mount_target" "test" {
count = 3
file_system_id = aws_efs_file_system.test.id
subnet_id = local.directory_subnet_ids[count.index]
security_groups = [aws_security_group.test_ecs.id]
}
###############################################################################
#### ECS Task and Service
###############################################################################
resource "aws_ecs_task_definition" "test" {
family = "test"
requires_compatibilities = ["FARGATE"]
cpu = var.test_cpu_limit
memory = var.test_memory_limit
container_definitions = <<JSON
[
{
"name": "test",
"image": "${var.test_image_registry_repo_and_image_name}",
"memory": ${var.test_memory_limit},
"cpu": ${var.test_cpu_limit},
"essential": true,
"portMappings": [
{
"containerPort": 7001,
"hostPort": 7001,
"protocol": "tcp"
},
{
"containerPort": 7002,
"hostPort": 7002,
"protocol": "tcp"
},
{
"containerPort": 9001,
"hostPort": 9001,
"protocol": "tcp"
},
{
"containerPort": 9002,
"hostPort": 9002,
"protocol": "tcp"
}
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "${aws_cloudwatch_log_group.test_ecs.name}",
"awslogs-region": "${data.aws_region.main.name}",
"awslogs-stream-prefix": "ecs"
}
},
"linuxParameters": {
"initProcessEnabled": true
},
"mountPoints": [
{
"containerPath": "/",
"sourceVolume": "${var.fargate_container_name}"
}
]
}
]
JSON
volume {
name = var.fargate_container_name
efs_volume_configuration {
file_system_id = aws_efs_file_system.test.id
transit_encryption = "ENABLED"
transit_encryption_port = 2049
authorization_config {
access_point_id = aws_efs_access_point.test.id
iam = "ENABLED"
}
}
}
network_mode = "awsvpc"
# The role used by ECS to pull images and the like.
execution_role_arn = aws_iam_role.test_ecs_execution.arn
task_role_arn = aws_iam_role.test_task_ecs.arn
tags = merge(
local.tags, {
"Name" = "test"
}
)
}
resource "aws_ecs_service" "test" {
name = "test"
cluster = aws_ecs_cluster.test.id
task_definition = aws_ecs_task_definition.test.arn
desired_count = var.test_desired_count
enable_execute_command = true
platform_version = "1.4.0"
# service_registries {
# registry_arn = aws_service_discovery_service.test.arn
# container_name = "test"
# }
capacity_provider_strategy {
base = var.fargate_capacity_provider_base_value
capacity_provider = "FARGATE"
weight = var.fargate_capacity_provider_weight_value
}
capacity_provider_strategy {
base = var.fargate_spot_capacity_provider_base_value
capacity_provider = "FARGATE_SPOT"
weight = var.fargate_spot_capacity_provider_weight_value
}
network_configuration {
security_groups = [aws_security_group.test_ecs.id]
subnets = local.directory_subnet_ids
}
tags = merge(
local.tags, {
"Name" = "test"
}
)
}
resource "aws_security_group" "test_ecs" {
name_prefix = "test-ecs"
description = "Allow strict inbound access to ECS Tasks"
vpc_id = data.aws_vpc.primary.id
ingress {
from_port = 2049
to_port = 2049
protocol = "tcp"
cidr_blocks = [data.aws_vpc.primary.cidr_block]
}
ingress {
from_port = 7001
to_port = 7002
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 9001
to_port = 9002
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = merge(
local.tags, {
"Name" = "test-ecs"
}
)
}
resource "aws_iam_role" "test_task_ecs" {
name = "EST"
description = "Test."
permissions_boundary = data.aws_iam_policy.role_permissions_boundary.arn
assume_role_policy = <<POLICY
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
I've explicitly set the Fargate version in the service, I saw some other SO user answered stating that the VPC needed to have DNS hostnames and resolution set to true -- they are. I'm still getting the error:
container_linux.go:370: starting container process caused: process_linux.go:459: container init caused: rootfs_linux.go:71: creating device nodes caused: errno 524
It seems to be connected to "mountPoints" block in the container definition, as removing it will at least start the container, but it will not mount the EFS volume.
EDIT: Added ECS Task role
EDIT 2: Adding role permissions boundary:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "UseServices",
"Effect": "Allow",
"Action": [
"organizations:DescribeOrganization",
"cloudshell:*",
"compute-optimizer:*",
"amplify:*",
"appmesh:*",
"appmesh-preview:*",
"appconfig:*",
"appflow:*",
"clouddirectory:*",
"datapipeline:*",
"dms:*",
"dbqms:*",
"devicefarm:*",
"devops-guru:*",
"ds:*",
"autoscaling:*",
"imagebuilder:*",
"ec2-instance-connect:*",
"ecr-public:*",
"forecast:*",
"honeycode:*",
"proton:*",
"rds-db:*",
"rds-data:*",
"access-analyzer:*",
"ce:*",
"cur:*",
"health:*",
"pricing:*",
"ram:Get*",
"ram:List*",
"servicequotas:*",
"ssm:*",
"ssmmessages:*",
"support:*",
"tag:*",
"cloudfront:*",
"elasticloadbalancing:*",
"ecs:*",
"ecr:*",
"cloudwatch:*",
"synthetics:*",
"servicequotas:*",
"apigateway:*",
"rds:*",
"secretsmanager:*",
"route53:*",
"acm:*",
"resource-groups:*",
"servicediscovery:*",
"application-autoscaling:*",
"ec2messages:*",
"trustedadvisor:*",
"cloud9:*",
"codeartifact:*",
"codebuild:*",
"codecommit:*",
"codedeploy:*",
"codepipeline:*",
"codestar:*",
"codestar-connections:*",
"codestar-notifications:*",
"cognito-identity:*",
"cognito-idp:*",
"cognito-sync:*",
"dynamodb:*",
"eks:*",
"emr-containers:*",
"elasticache:*",
"elasticbeanstalk:*",
"elasticfilesystem:*",
"firehose:*",
"kafka:*",
"kinesis:*",
"kinesisanalytics:*",
"serverlessrepo:*",
"sqs:*",
"xray:*",
"workspaces:*",
"wam:*",
"appsync:*",
"athena:*",
"batch:*",
"states:*",
"backup:*",
"backup-storage:*",
"es:*",
"glue:*",
"databrew:*",
"lightsail:*",
"timestream:*",
"schemas:*",
"ec2:*",
"sts:AssumeRole",
"sts:TagSession",
"cloudformation:*",
"lambda:*",
"s3:*",
"sns:*",
"events:*",
"kms:*",
"logs:*",
"cloudtrail:*",
"iam:ListAccountAliases"
],
"Resource": "*"
},
{
"Sid": "AllowServiceLinkedRole",
"Effect": "Allow",
"Action": [
"iam:CreateServiceLinkedRole",
"iam:DeleteServiceLinkedRole",
"iam:GetServiceLinkedRoleDeletionStatus",
"iam:UpdateRole"
],
"Resource": [
"arn:aws:iam::*:role/aws-service-role/*"
]
},
{
"Sid": "AllowPolicy",
"Effect": "Allow",
"Action": [
"iam:GetPolicy",
"iam:DeletePolicy",
"iam:CreatePolicy",
"iam:GetPolicyVersion",
"iam:CreatePolicyVersion",
"iam:DeletePolicyVersion",
"iam:ListPolicyVersions"
],
"Resource": [
"arn:aws:iam::*:policy/*"
]
},
{
"Sid": "AllowReadRole",
"Effect": "Allow",
"Action": [
"iam:GetRole",
"iam:DeleteRole",
"iam:TagRole",
"iam:UpdateRoleDescription",
"iam:ListInstanceProfilesForRole",
"iam:ListAttachedRolePolicies",
"iam:ListRolePolicies",
"iam:UpdateAssumeRolePolicy",
"iam:PassRole",
"iam:GetRolePolicy"
],
"Resource": [
"arn:aws:iam::*:role/*"
]
},
{
"Sid": "AllowWriteRole",
"Effect": "Allow",
"Action": [
"iam:CreateRole",
"iam:DeleteRolePolicy",
"iam:AttachRolePolicy",
"iam:DetachRolePolicy",
"iam:PutRolePermissionsBoundary",
"iam:PutRolePolicy",
"iam:UpdateRole",
"iam:PassRole"
],
"Resource": "*",
"Condition": {
"StringEquals": {
"iam:PermissionsBoundary": "arn:aws:iam::835718480179:policy/CuriPipelineAdministratorAccessPermBoundaries"
}
}
},
{
"Sid": "AllowWriteInstanceProfile",
"Effect": "Allow",
"Action": [
"iam:AddRoleToInstanceProfile",
"iam:CreateInstanceProfile",
"iam:DeleteInstanceProfile",
"iam:GetInstanceProfile",
"iam:ListInstanceProfiles",
"iam:RemoveRoleFromInstanceProfile"
],
"Resource": [
"arn:aws:iam::*:instance-profile/*"
]
},
{
"Sid": "DenyIamActions",
"Effect": "Deny",
"Action": [
"iam:*OpenIDConnect*",
"iam:*SAMLProvider*",
"iam:*User*",
"iam:*Group*",
"iam:*AccessKey*",
"iam:*Password*",
"iam:CreateAccountAliases",
"iam:DeleteAccountAliases",
"iam:*LoginProfile*",
"iam:*ServiceSpecificCredential*",
"iam:*MFADevice*",
"iam:*CredentialReport*",
"iam:*OrganizationsAccessReport*",
"iam:*SecurityTokenServicePreferences*",
"iam:GetAccountAuthorizationDetails",
"iam:GetAccountSummary"
],
"Resource": "*"
},
{
"Sid": "NoBoundaryPolicyEdit",
"Effect": "Deny",
"Action": [
"iam:CreatePolicyVersion",
"iam:DeletePolicy",
"iam:DeletePolicyVersion",
"iam:SetDefaultPolicyVersion"
],
"Resource": [
"arn:aws:iam::835718480179:policy/CuriPipelineAdministratorAccessPermBoundaries"
]
},
{
"Sid": "NoSelfRoleEdit",
"Effect": "Deny",
"Action": [
"iam:Add*",
"iam:Attach*",
"iam:Change*",
"iam:Create*",
"iam:Delete*",
"iam:Deactivate*",
"iam:Detach*",
"iam:Enable*",
"iam:Update*",
"iam:Put*",
"iam:Remove*",
"iam:Reset*",
"iam:Tag*",
"iam:Untag*"
],
"Resource": [
"arn:aws:iam::835718480179:role/CuriPipelineAdministratorAccess"
]
}
]
}
The whole problem had nothing to do with AWS, but the server I am running (weblogic) failed to start because I was trying to mount EFS in /, which cannot be done as it would overlay many critical startup and credential files. If I had the whole filesystem already on EFS (which I did not, I used a blank filesystem), then this likely would have been fine. I mounted it successfully to a lower subdirectory and the container spun up and is running.
I have created resources for log_group for list of given job names
resource "aws_cloudwatch_log_group" "logGroups" {
count = length(var.jobnames)
name = format("/aws/lambda/%s", format(local.function_name_format, var.jobnames[count.index]))
retention_in_days = 7
}
and now for the each log resource i am creating an iam policy
resource "aws_iam_policy" "base_iam_policy" {
count = length(var.jobnames)
name = format(local.base_iam_policy_name_format, var.jobnames[count.index])
path = "/"
description = "Base IAM policy for creating a lambda"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": [
"${element(aws_cloudwatch_log_group.logGroups.*.arn, count.index)}*"
]
},
{
"Action": [
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics"
],
"Resource": "*",
"Effect": "Allow"
},
{
"Effect": "Allow",
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DeleteNetworkInterface"
],
"Resource": "*",
"Condition": {
"StringEquals": {
"aws:RequestedRegion": "${var.region}"
}
}
}
]
}
EOF
}
The issue is that for each BASE_IAM_POLICY, the resource for CreateLogStream is same. Looks like
in this "${element(aws_cloudwatch_log_group.logGroups.*.arn, count.index)}*" count is not getting incremented ?
Honestly, this seems like a bug in terraform. In the meantime I'd recommend indexing the elements directly, like the following
"${aws_cloudwatch_log_group.logGroups[count.index].arn}*"
This question already has answers here:
MalformedPolicyDocument error when creating policy via terraform
(2 answers)
Closed 3 years ago.
I am trying to create a lambda role and attach policies to it so it can start and stop ec2 instance. I will be triggering the lambda using cloudwatch.
I am getting this error:
"Error: Error creating IAM Role lambdaRole: MalformedPolicyDocument: JSON strings must not have leading spaces
status code: 400, request id: d6a86c41-6601-43af-9040-81f6e6a76ec8
on iam.tf line 11, in resource "aws_iam_role" "lambdaRole":
11: resource "aws_iam_role" "lambdaRole" {"
terraform {
backend "s3" {
region = "us-west-2"
bucket = "gitlegionbucket"
key = "ec2/terraform.tfstate"
dynamodb_table = "tf-state-lock"
}
}
resource "aws_iam_role" "lambdaRole" {
name = "lambdaRole"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy" {
name = "test-policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"ec2:Start*",
"ec2:Stop*"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "attach-policies" {
role = "${aws_iam_role.lambdaRole.name}"
policy_arn = "${aws_iam_policy.policy.arn}"
}
I was also facing the same error. I have directly copied the code from the question.
The way it worked was for me was to start the { i.e the start of the policy after the EOF line immediately without any spaces.
resource "aws_iam_role" "lambdaRole" {
name = "lambdaRole"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy" {
name = "test-policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"ec2:Start*",
"ec2:Stop*"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "attach-policies" {
role = "${aws_iam_role.lambdaRole.name}"
policy_arn = "${aws_iam_policy.policy.arn}"
}
terraform output:
aws_iam_role.lambdaRole: Creating...
aws_iam_role.lambdaRole: Creation complete after 2s [id=lambdaRole]
aws_iam_role_policy_attachment.attach-policies: Creating...
aws_iam_role_policy_attachment.attach-policies: Creation complete after 1s [id=lambdaRole-20191107141649610400000001]
Apply complete! Resources: 2 added, 0 changed, 0 destroyed.
Alright, I found a solution to this problem. I moved the jsons into different files and I just referred to those files instead.
like this, policy = "${file("lambda-policy.json")}"
and this I have in "lambda-policy.json" :
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:StartInstances",
"logs:",
"ec2:StopInstances"
],
"Resource": ""
}
]
}
I have a simple module that's something like this:
module "EncryptionKeys" {
source = "../../../../Modules/KeyGenerator"
item_list = ["${module.static_variables.item_list}"]
account_id = "${module.static_variables.account_id}"
key_alias_suffix = "a-suffix"
key_administrator_role = "${data.aws_iam_role.admins.name}"
key_user_suffix = "some-other-suffix"
}
Here is the key resource within the module:
resource "aws_kms_key" "key" {
count = "${length(var.item_list)}"
description = "${var.description}"
policy = "${data.aws_iam_policy_document.key_document.json}"
enable_key_rotation = "${var.enable_key_rotation}"
}
The module itself is making an AWS IAM role/policy with the following statement:
statement {
sid = "Allow use of the key for users"
effect = "Allow"
principals {
identifiers =
["arn:aws:iam::${var.account_id}:role/${var.key_administrator_role}", "${element(split(".",var.item_list[count.index]),0)}-${var.key_user_suffix}"]
type = "AWS"
}
actions = [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
]
resources = ["*"]
}
The problem? When I view the successful terraform plan, item_list is parsed correctly according to the element, but it's only ever the same value. i.e. if I have item_list defined as:
item_list = ["a.blah", "b.foo", "c.bar", "d.foobar"]
there will be four instances of the relevant resources, the correct split will occur on the ".", but all will be named for "a".
"{
"Version": "2012-10-17",
"Id": "key=consolepolicy-3",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Action": "kms:*",
"Resource": "*",
"Principal": {
"AWS": "arn:aws:iam::123456789:role/Admins"
}
},
{
"Sid": "Allow attachment of persistent resources for admin",
"Effect": "Allow",
"Action": [
"kms:RevokeGrant",
"kms:ListGrants",
"kms:CreateGrant"
],
"Resource": "*",
"Principal": {
"AWS": "arn:aws:iam::123456789:role/Admins"
},
"Condition": {
"Bool": {
"kms:GrantIsForAWSResource": "true"
}
}
},
{
"Sid": "Allow use of the key for users",
"Effect": "Allow",
"Action": [
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:Encrypt",
"kms:DescribeKey",
"kms:Decrypt"
],
"Resource": "*",
"Principal": {
"AWS": [
"a-stg-role",
"arn:aws:iam::123456789:role/Admins"
]
}
},
{
"Sid": "Allow attachment of persistent resources for users",
"Effect": "Allow",
"Action": [
"kms:RevokeGrant",
"kms:ListGrants",
"kms:CreateGrant"
],
"Resource": "*",
"Principal": {
"AWS": [
"a-stg-role",
"arn:aws:iam::123456789:role/Admins"
]
},
"Condition": {
"Bool": {
"kms:GrantIsForAWSResource": "true"
}
}
}
]
}"
Am I doing something fundamentally wrong with count/count.index here? Why won't it loop item_list, and why does it always seem to get the same value?
You specify the count on the resource key, but that does not mean it is available for your aws_iam_policy_document.
Try to include the count in the aws_iam_policy_document, e.g.
data "aws_iam_policy_document" "key_document" {
count = "${length(var.item_list)}"
# rest of template ....
}
Then reference the policy list from the key resource using the count in the key resource: policy = "${element(data.aws_iam_policy_document.key_document.*.json, count.index)}"