Attaching an EFS volume to Fargate? - terraform

I know this question has been asked before, and I've seen several of the SO responses and read the AWS docs on the subject... I have a terraform module that, in part, builds out an ECS service, cluster, task, and Fargate container:
###############################################################################
#### EFS for added stoage
#### TODO: remove in favor of larger ephmemeral storage when terraform supports it
###############################################################################
resource "aws_efs_file_system" "test" {
creation_token = var.fargate_container_name
tags = {
Name = "test"
}
}
resource "aws_efs_access_point" "test" {
file_system_id = aws_efs_file_system.test.id
root_directory {
path = "/"
}
}
resource "aws_efs_mount_target" "test" {
count = 3
file_system_id = aws_efs_file_system.test.id
subnet_id = local.directory_subnet_ids[count.index]
security_groups = [aws_security_group.test_ecs.id]
}
###############################################################################
#### ECS Task and Service
###############################################################################
resource "aws_ecs_task_definition" "test" {
family = "test"
requires_compatibilities = ["FARGATE"]
cpu = var.test_cpu_limit
memory = var.test_memory_limit
container_definitions = <<JSON
[
{
"name": "test",
"image": "${var.test_image_registry_repo_and_image_name}",
"memory": ${var.test_memory_limit},
"cpu": ${var.test_cpu_limit},
"essential": true,
"portMappings": [
{
"containerPort": 7001,
"hostPort": 7001,
"protocol": "tcp"
},
{
"containerPort": 7002,
"hostPort": 7002,
"protocol": "tcp"
},
{
"containerPort": 9001,
"hostPort": 9001,
"protocol": "tcp"
},
{
"containerPort": 9002,
"hostPort": 9002,
"protocol": "tcp"
}
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "${aws_cloudwatch_log_group.test_ecs.name}",
"awslogs-region": "${data.aws_region.main.name}",
"awslogs-stream-prefix": "ecs"
}
},
"linuxParameters": {
"initProcessEnabled": true
},
"mountPoints": [
{
"containerPath": "/",
"sourceVolume": "${var.fargate_container_name}"
}
]
}
]
JSON
volume {
name = var.fargate_container_name
efs_volume_configuration {
file_system_id = aws_efs_file_system.test.id
transit_encryption = "ENABLED"
transit_encryption_port = 2049
authorization_config {
access_point_id = aws_efs_access_point.test.id
iam = "ENABLED"
}
}
}
network_mode = "awsvpc"
# The role used by ECS to pull images and the like.
execution_role_arn = aws_iam_role.test_ecs_execution.arn
task_role_arn = aws_iam_role.test_task_ecs.arn
tags = merge(
local.tags, {
"Name" = "test"
}
)
}
resource "aws_ecs_service" "test" {
name = "test"
cluster = aws_ecs_cluster.test.id
task_definition = aws_ecs_task_definition.test.arn
desired_count = var.test_desired_count
enable_execute_command = true
platform_version = "1.4.0"
# service_registries {
# registry_arn = aws_service_discovery_service.test.arn
# container_name = "test"
# }
capacity_provider_strategy {
base = var.fargate_capacity_provider_base_value
capacity_provider = "FARGATE"
weight = var.fargate_capacity_provider_weight_value
}
capacity_provider_strategy {
base = var.fargate_spot_capacity_provider_base_value
capacity_provider = "FARGATE_SPOT"
weight = var.fargate_spot_capacity_provider_weight_value
}
network_configuration {
security_groups = [aws_security_group.test_ecs.id]
subnets = local.directory_subnet_ids
}
tags = merge(
local.tags, {
"Name" = "test"
}
)
}
resource "aws_security_group" "test_ecs" {
name_prefix = "test-ecs"
description = "Allow strict inbound access to ECS Tasks"
vpc_id = data.aws_vpc.primary.id
ingress {
from_port = 2049
to_port = 2049
protocol = "tcp"
cidr_blocks = [data.aws_vpc.primary.cidr_block]
}
ingress {
from_port = 7001
to_port = 7002
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 9001
to_port = 9002
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = merge(
local.tags, {
"Name" = "test-ecs"
}
)
}
resource "aws_iam_role" "test_task_ecs" {
name = "EST"
description = "Test."
permissions_boundary = data.aws_iam_policy.role_permissions_boundary.arn
assume_role_policy = <<POLICY
{
"Version": "2008-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
I've explicitly set the Fargate version in the service, I saw some other SO user answered stating that the VPC needed to have DNS hostnames and resolution set to true -- they are. I'm still getting the error:
container_linux.go:370: starting container process caused: process_linux.go:459: container init caused: rootfs_linux.go:71: creating device nodes caused: errno 524
It seems to be connected to "mountPoints" block in the container definition, as removing it will at least start the container, but it will not mount the EFS volume.
EDIT: Added ECS Task role
EDIT 2: Adding role permissions boundary:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "UseServices",
"Effect": "Allow",
"Action": [
"organizations:DescribeOrganization",
"cloudshell:*",
"compute-optimizer:*",
"amplify:*",
"appmesh:*",
"appmesh-preview:*",
"appconfig:*",
"appflow:*",
"clouddirectory:*",
"datapipeline:*",
"dms:*",
"dbqms:*",
"devicefarm:*",
"devops-guru:*",
"ds:*",
"autoscaling:*",
"imagebuilder:*",
"ec2-instance-connect:*",
"ecr-public:*",
"forecast:*",
"honeycode:*",
"proton:*",
"rds-db:*",
"rds-data:*",
"access-analyzer:*",
"ce:*",
"cur:*",
"health:*",
"pricing:*",
"ram:Get*",
"ram:List*",
"servicequotas:*",
"ssm:*",
"ssmmessages:*",
"support:*",
"tag:*",
"cloudfront:*",
"elasticloadbalancing:*",
"ecs:*",
"ecr:*",
"cloudwatch:*",
"synthetics:*",
"servicequotas:*",
"apigateway:*",
"rds:*",
"secretsmanager:*",
"route53:*",
"acm:*",
"resource-groups:*",
"servicediscovery:*",
"application-autoscaling:*",
"ec2messages:*",
"trustedadvisor:*",
"cloud9:*",
"codeartifact:*",
"codebuild:*",
"codecommit:*",
"codedeploy:*",
"codepipeline:*",
"codestar:*",
"codestar-connections:*",
"codestar-notifications:*",
"cognito-identity:*",
"cognito-idp:*",
"cognito-sync:*",
"dynamodb:*",
"eks:*",
"emr-containers:*",
"elasticache:*",
"elasticbeanstalk:*",
"elasticfilesystem:*",
"firehose:*",
"kafka:*",
"kinesis:*",
"kinesisanalytics:*",
"serverlessrepo:*",
"sqs:*",
"xray:*",
"workspaces:*",
"wam:*",
"appsync:*",
"athena:*",
"batch:*",
"states:*",
"backup:*",
"backup-storage:*",
"es:*",
"glue:*",
"databrew:*",
"lightsail:*",
"timestream:*",
"schemas:*",
"ec2:*",
"sts:AssumeRole",
"sts:TagSession",
"cloudformation:*",
"lambda:*",
"s3:*",
"sns:*",
"events:*",
"kms:*",
"logs:*",
"cloudtrail:*",
"iam:ListAccountAliases"
],
"Resource": "*"
},
{
"Sid": "AllowServiceLinkedRole",
"Effect": "Allow",
"Action": [
"iam:CreateServiceLinkedRole",
"iam:DeleteServiceLinkedRole",
"iam:GetServiceLinkedRoleDeletionStatus",
"iam:UpdateRole"
],
"Resource": [
"arn:aws:iam::*:role/aws-service-role/*"
]
},
{
"Sid": "AllowPolicy",
"Effect": "Allow",
"Action": [
"iam:GetPolicy",
"iam:DeletePolicy",
"iam:CreatePolicy",
"iam:GetPolicyVersion",
"iam:CreatePolicyVersion",
"iam:DeletePolicyVersion",
"iam:ListPolicyVersions"
],
"Resource": [
"arn:aws:iam::*:policy/*"
]
},
{
"Sid": "AllowReadRole",
"Effect": "Allow",
"Action": [
"iam:GetRole",
"iam:DeleteRole",
"iam:TagRole",
"iam:UpdateRoleDescription",
"iam:ListInstanceProfilesForRole",
"iam:ListAttachedRolePolicies",
"iam:ListRolePolicies",
"iam:UpdateAssumeRolePolicy",
"iam:PassRole",
"iam:GetRolePolicy"
],
"Resource": [
"arn:aws:iam::*:role/*"
]
},
{
"Sid": "AllowWriteRole",
"Effect": "Allow",
"Action": [
"iam:CreateRole",
"iam:DeleteRolePolicy",
"iam:AttachRolePolicy",
"iam:DetachRolePolicy",
"iam:PutRolePermissionsBoundary",
"iam:PutRolePolicy",
"iam:UpdateRole",
"iam:PassRole"
],
"Resource": "*",
"Condition": {
"StringEquals": {
"iam:PermissionsBoundary": "arn:aws:iam::835718480179:policy/CuriPipelineAdministratorAccessPermBoundaries"
}
}
},
{
"Sid": "AllowWriteInstanceProfile",
"Effect": "Allow",
"Action": [
"iam:AddRoleToInstanceProfile",
"iam:CreateInstanceProfile",
"iam:DeleteInstanceProfile",
"iam:GetInstanceProfile",
"iam:ListInstanceProfiles",
"iam:RemoveRoleFromInstanceProfile"
],
"Resource": [
"arn:aws:iam::*:instance-profile/*"
]
},
{
"Sid": "DenyIamActions",
"Effect": "Deny",
"Action": [
"iam:*OpenIDConnect*",
"iam:*SAMLProvider*",
"iam:*User*",
"iam:*Group*",
"iam:*AccessKey*",
"iam:*Password*",
"iam:CreateAccountAliases",
"iam:DeleteAccountAliases",
"iam:*LoginProfile*",
"iam:*ServiceSpecificCredential*",
"iam:*MFADevice*",
"iam:*CredentialReport*",
"iam:*OrganizationsAccessReport*",
"iam:*SecurityTokenServicePreferences*",
"iam:GetAccountAuthorizationDetails",
"iam:GetAccountSummary"
],
"Resource": "*"
},
{
"Sid": "NoBoundaryPolicyEdit",
"Effect": "Deny",
"Action": [
"iam:CreatePolicyVersion",
"iam:DeletePolicy",
"iam:DeletePolicyVersion",
"iam:SetDefaultPolicyVersion"
],
"Resource": [
"arn:aws:iam::835718480179:policy/CuriPipelineAdministratorAccessPermBoundaries"
]
},
{
"Sid": "NoSelfRoleEdit",
"Effect": "Deny",
"Action": [
"iam:Add*",
"iam:Attach*",
"iam:Change*",
"iam:Create*",
"iam:Delete*",
"iam:Deactivate*",
"iam:Detach*",
"iam:Enable*",
"iam:Update*",
"iam:Put*",
"iam:Remove*",
"iam:Reset*",
"iam:Tag*",
"iam:Untag*"
],
"Resource": [
"arn:aws:iam::835718480179:role/CuriPipelineAdministratorAccess"
]
}
]
}

The whole problem had nothing to do with AWS, but the server I am running (weblogic) failed to start because I was trying to mount EFS in /, which cannot be done as it would overlay many critical startup and credential files. If I had the whole filesystem already on EFS (which I did not, I used a blank filesystem), then this likely would have been fine. I mounted it successfully to a lower subdirectory and the container spun up and is running.

Related

Can an instance profile resource in Terraform have multiple inline policies?

I'm trying to convert a Cloud Formation script to Terraform. One of the issues I'm running into in the script
Cloud Formation:
"Resources": {
"*******InstanceProfile": {
"Condition": "IsInstanceProfile",
"Type": "AWS::IAM::Role",
"Properties": {
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
},
"Policies": [
{
"PolicyName": "********-*******-instance",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Resource": [
{
"Fn::Sub": "arn:aws:iam::*:role/${ReadOnlyRole}"
},
{
"Fn::Sub": "arn:aws:iam::${AWSOrganizationsAccountID}:role/${OrganizationsRole}"
}
]
}
]
}
}
],
"RoleName": {
"Ref": "AccessName"
}
}
}
I am ASSUMING I can use an inline_policy within a resource like in aws_iam_role. Here is my Terraform snippet that I've produced but get errors:
resource "aws_iam_role" "*****_instance_profile" {
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = ""
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
policy = jsonencode({
Version = "2012-10-17"
Statement = [
Action = ["sts:AssumeRole"]
Effect = "Allow"
Resource = "arn:aws:iam::*:role/${ReadOnlyRole"
]
})
}
I guess I'm stuck with adding the resource within the policy. How would anyone handle this conversion? Do you folks think I'm going down the correct path with aws_iam_role? And, if so, how would you handle multiple resources in an inline_policy?
I would just use the official documentation when adding multiple resources to a policy.
data "aws_iam_policy_document" "example" {
statement {
actions = [
"s3:ListAllMyBuckets",
"s3:GetBucketLocation",
]
resources = [
"arn:aws:s3:::*",
]
}
statement {
actions = [
"s3:*",
]
resources = [
"arn:aws:s3:::${var.s3_bucket_name}/home/&{aws:username}",
"arn:aws:s3:::${var.s3_bucket_name}/home/&{aws:username}/*",
]
}
}
resource "aws_iam_user" "user" {
name = "test-user"
}
resource "aws_iam_policy" "policy" {
name = "test-policy"
description = "A test policy"
policy = data.aws_iam_policy_document.example.json
}
resource "aws_iam_user_policy_attachment" "test-attach" {
user = aws_iam_user.user.name
policy_arn = aws_iam_policy.policy.arn
}

Terraform Add IP Range to Bucket Policy

I'm creating an s3 bucket using terraform and need to add a bucket policy to it, to whitelist a bunch of IP addresses.
resource "aws_s3_bucket" "foo-bucket" {
bucket = "foobar-bucket"
}
resource "aws_s3_bucket_policy" "foo-policy" {
bucket = "foobar-bucket"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "HTTP",
"Effect": "Deny",
"Principal": "*",
"Action": "*",
"Resource": [
"arn:aws:s3:::foobar-bucket/*",
"arn:aws:s3:::foobar-bucket"
],
"Condition": {
"Bool": {
"aws:SecureTransport": "false"
}
}
},
{
"Sid": "IPAllow",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::foobar-bucket/*",
"arn:aws:s3:::foobar-bucket"
],
"Condition": {
"NotIpAddress": {
"aws:SourceIp": [
"${join("\", \"", concat(var.foo_ip,
var.bar_ip,
var.foobar_ip))}"
]
},
"Bool": {
"aws:ViaAWSService": "true"
}
}
}
]
}
POLICY
}
The issue I have here is that while var.foo_ip and var.bar_ip are a single IP address, var.foobar_ip is an IP address range. 10.0.0.0 - 10.0.0.200
I don't want to have to write the IP address out 200 times, like
variable "foobar_ip" {
type = list(string)
default = [
"10.0.0.0",
"10.0.0.1",
"10.0.0.2"
]
}
All the way to 200
Is there a way to pass in this IP range so that the required range is populated?.
You can create a locals block.
locals {
cidr = "10.0.0.0/24"
ip_addresses = [ for host_number in range(0, 201) : cidrhost(local.cidr, host_number) ]
}
And then your policy becomes
"Sid": "IPAllow",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::foobar-bucket/*",
"arn:aws:s3:::foobar-bucket"
],
"Condition": {
"NotIpAddress": {
"aws:SourceIp": [
"${join("\", \"", concat(var.foo_ip,
var.bar_ip,
local.ip_addresses))}"

add cloud_watch_logs_role_arn to aws_cloudtrail resource terraform

Hi I am following the guide for https://d1.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf. I would like to complete the infrastructure changes for section 3. To start this process I need to create an aws_cloudtrail resource with SSE-KMS encryption enabled. I'm currently reviewing the documentation for this resource. How do I automate the process of specifying the role for the CloudWatch Logs endpoint to assume to write to a user’s log group?
resource aws_cloudtrail "cisbenchmark" {
name = "cis-benchmark"
enable_logging = true
s3_bucket_name = aws_s3_bucket.cisbenchmark.bucket
enable_log_file_validation = true
is_multi_region_trail = true
include_global_service_events = true
cloud_watch_logs_role_arn = ????
cloud_watch_logs_group_arn = aws_cloudwatch_log_group.cisbenchmark.arn
kms_key_id = var.kms_key_arn
is_organization_trail = true
}
I've added ???? to specify where I'm missing the attribute.
If anyone is still looking for the answer:
resource aws_cloudtrail "cisbenchmark" {
name = "cis-benchmark"
enable_logging = true
s3_bucket_name = aws_s3_bucket.cisbenchmark.bucket
enable_log_file_validation = true
is_multi_region_trail = true
include_global_service_events = true
cloud_watch_logs_role_arn = aws_iam_role.cloud_trail.arn
cloud_watch_logs_group_arn = aws_cloudwatch_log_group.cisbenchmark.arn
kms_key_id = var.kms_key_arn
is_organization_trail = true
}
resource "aws_iam_role" "cloud_trail" {
name = "cloudTrail-cloudWatch-role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_role_policy" "aws_iam_role_policy_cloudTrail_cloudWatch" {
name = "cloudTrail-cloudWatch-policy"
role = aws_iam_role.cloud_trail.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSCloudTrailCreateLogStream2014110",
"Effect": "Allow",
"Action": [
"logs:CreateLogStream"
],
"Resource": [
"${aws_cloudwatch_log_group.cisbenchmark.arn}:*"
]
},
{
"Sid": "AWSCloudTrailPutLogEvents20141101",
"Effect": "Allow",
"Action": [
"logs:PutLogEvents"
],
"Resource": [
"${aws_cloudwatch_log_group.cisbenchmark.arn}:*"
]
}
]
}
EOF
}

Count Not Working Correctly in Terraform Resource aws_iam_policy

I have created resources for log_group for list of given job names
resource "aws_cloudwatch_log_group" "logGroups" {
count = length(var.jobnames)
name = format("/aws/lambda/%s", format(local.function_name_format, var.jobnames[count.index]))
retention_in_days = 7
}
and now for the each log resource i am creating an iam policy
resource "aws_iam_policy" "base_iam_policy" {
count = length(var.jobnames)
name = format(local.base_iam_policy_name_format, var.jobnames[count.index])
path = "/"
description = "Base IAM policy for creating a lambda"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": [
"${element(aws_cloudwatch_log_group.logGroups.*.arn, count.index)}*"
]
},
{
"Action": [
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics"
],
"Resource": "*",
"Effect": "Allow"
},
{
"Effect": "Allow",
"Action": [
"ec2:CreateNetworkInterface",
"ec2:DescribeNetworkInterfaces",
"ec2:DeleteNetworkInterface"
],
"Resource": "*",
"Condition": {
"StringEquals": {
"aws:RequestedRegion": "${var.region}"
}
}
}
]
}
EOF
}
The issue is that for each BASE_IAM_POLICY, the resource for CreateLogStream is same. Looks like
in this "${element(aws_cloudwatch_log_group.logGroups.*.arn, count.index)}*" count is not getting incremented ?
Honestly, this seems like a bug in terraform. In the meantime I'd recommend indexing the elements directly, like the following
"${aws_cloudwatch_log_group.logGroups[count.index].arn}*"

Terraform: Setting up logging from AWS LoadBalancer to S3 bucket

I have an aws_lb that I want to log to an S3 bucket.
What I have unsuccessfully tried to do:
data "aws_elb_service_account" "main" {}
data "aws_iam_policy_document" "bucket_policy" {
statement {
sid = ""
actions = ["s3:PutObject"]
resources = ["arn:aws:s3:::my-bucket/*"]
principals {
type = "AWS"
identifiers = ["${data.aws_elb_service_account.main.arn}"]
}
}
}
I also tried this:
resource "aws_iam_role" "lb-logs-role" {
name = "lb-logs-role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "elasticloadbalancing.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
tags = {
Name = "lb-logs-role"
Environment = terraform.workspace
Management = "Managed by Terraform"
}
}
resource "aws_iam_role_policy" "s3-logs-access" {
name = "s3-logs-access"
role = aws_iam_role.lb-logs-role.id
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::my-bucket/*"
}
]
}
EOF
}
This is the error I am seeing:
Error: Failure configuring LB attributes: InvalidConfigurationRequest: Access Denied for bucket: my-bucket. Please check S3bucket permission
status code: 400, request id: 5b629210-9738-11e9-bcc6-6f3b4f22bf28
on modules/tableau-linux/lb.tf line 1, in resource "aws_lb" "main":
1: resource "aws_lb" "main" {
Any ideas?
It looks like the API will request the ACL of the bucket to see if it has permission, and populate the initial folder structure, therefore the even though the aws_elb_service_account has permissions to putObject in the bucket the api call will fail. This policy is what the AWS web console creates when it creates the S3 bucket for you, and it solved it for me.
data "aws_region" "current" {}
data "aws_caller_identity" "current" {}
data "aws_elb_service_account" "main" {}
resource "aws_s3_bucket_policy" "lb-bucket-policy" {
bucket = aws_s3_bucket.lb-log-storage-s3.id
policy = <<POLICY
{
"Id": "Policy",
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {
"AWS": [
"${data.aws_elb_service_account.main.arn}"
]
},
"Action": [
"s3:PutObject"
],
"Resource": "${aws_s3_bucket.lb-log-storage-s3.arn}/AWSLogs/${data.aws_caller_identity.current.account_id}/*"
},
{
"Effect": "Allow",
"Principal": {
"Service": "delivery.logs.amazonaws.com"
},
"Action": [
"s3:PutObject"
],
"Resource": "${aws_s3_bucket.lb-log-storage-s3.arn}/AWSLogs/${data.aws_caller_identity.current.account_id}/*",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
},
{
"Effect": "Allow",
"Principal": {
"Service": "delivery.logs.amazonaws.com"
},
"Action": [
"s3:GetBucketAcl"
],
"Resource": "${aws_s3_bucket.lb-log-storage-s3.arn}"
}
]
}
POLICY
}
It seems the issue is with your policy but you can try my code using aws_lb, Here is the complete configuration to launch to LB in default VPC and create bucket named test-bucket-1-unique-name, policy and LB named test-http-lb. Along with SG and Route53 entry that is commented.
# Creating Load Balancer
resource "aws_lb" "httplb" {
name = "test-http-lb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.lbsg.id}"]
subnets = ["subnet-99fdf8e0", "subnet-902b0ddb"]
enable_deletion_protection = false
access_logs {
bucket = "${aws_s3_bucket.bucket.bucket}"
prefix = "http-lb"
enabled = true
}
tags = {
Environment = "test-http"
}
}
# Creating Security Groups for Load Balancer
resource "aws_security_group" "lbsg" {
name = "test-loadbalancer-sg"
description = "test-Allow LB traffic"
tags = {
Name = "test-SG-Balancer"
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "HTTP"
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
#uncomment this if you want to add route53 record
# resource "aws_route53_record" "web" {
# zone_id = "${data.aws_route53_zone.primary.zone_id}"
# name = "${var.env_prefix_name}.ironman.co
# type = "A"
# alias {
# name = "${aws_lb.httplb.dns_name}"
# zone_id = "${aws_lb.httplb.zone_id}"
# evaluate_target_health = true
# }
# }
data "aws_elb_service_account" "main" {}
# Creating policy on S3, for lb to write
resource "aws_s3_bucket_policy" "lb-bucket-policy" {
bucket = "${aws_s3_bucket.bucket.id}"
policy = <<POLICY
{
"Id": "testPolicy1561031527701",
"Version": "2012-10-17",
"Statement": [
{
"Sid": "testStmt1561031516716",
"Action": [
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::test-bucket-1-for-lb-logs/http-lb/*",
"Principal": {
"AWS": [
"${data.aws_elb_service_account.main.arn}"
]
}
}
]
}
POLICY
}
resource "aws_s3_bucket" "bucket" {
bucket = "test-bucket-1-for-lb-logs"
acl = "private"
region = "us-west-2"
versioning {
enabled = false
}
force_destroy = true
}
Then go to your S3 bucket and verify TestFile.
Here are the logs from terraform
This one stumped me also, but I got it working with this
{
Effect : "Allow",
Principal : {
"AWS" : "arn:aws:iam::127311923021:root"
},
Action : [
"s3:PutObject"
],
Resource : "${aws_s3_bucket.logging_bucket.arn}/AWSLogs/${data.aws_caller_identity.current.account_id}/*",
}
Where does 127311923021 come from, you ask? Believe me, I did too! This AWS document says it is the ID of the AWS account for Elastic Load Balancing for your Region (us-east-1 in this case). It has a large table of these magic numbers!

Resources