I'm a newbe to terraform world and experiencing some tough time around passing variables from .tf file to .json
My sample tf lambda function is as follows
data "template_file" "task" {
template = file("./iam/grange_rest_dynlambda_policy.json")
vars = {
resource="${var.stage}_grange_dynamodb"
}
}
resource "aws_lambda_function" "grange_rest_dynlambda" {
function_name = "${var.stage}_grange_rest_dynlambda"
handler = "lambda/src/index.handler"
memory_size = "256"
timeout = 10
reserved_concurrent_executions = "-1"
filename = "${path.module}/../dist/lambda.zip"
role = aws_iam_role.grange_rest_dynlambda_iam_role.arn
runtime = "nodejs14.x"
publish = true
}
resource "aws_lambda_alias" "grange_rest_dynlambda_alias" {
depends_on = ["aws_lambda_function.grange_rest_dynlambda"]
name = var.stage
description = var.stage
function_name = aws_lambda_function.grange_rest_dynlambda.arn
function_version = aws_lambda_function.grange_rest_dynlambda.version
}
// Enable cloudwatch for lambda
resource "aws_cloudwatch_log_group" "example" {
name = "/aws/lambda/${var.stage}_grange_rest_dynlambda"
retention_in_days = 14
}
# See also the following AWS managed policy: AWSLambdaBasicExecutionRole
resource "aws_iam_policy" "lambda_logging" {
name = "lambda_logging"
path = "/"
description = "IAM policy for logging from a lambda"
policy = file("./iam/grange_rest_dynlambda_logging_policy.json")
}
// Lambda + DynamoDB
resource "aws_iam_role" "grange_rest_dynlambda_iam_role" {
name = "grange_rest_dynlambda_iam_role"
assume_role_policy = file("./iam/grange_rest_dynlambda_assume_policy.json")
}
resource "aws_iam_role_policy" "grange_rest_dynlambda_iam_policy" {
policy = file("./iam/grange_rest_dynlambda_policy.json")
role = aws_iam_role.grange_rest_dynlambda_iam_role.id
}
resource "aws_iam_role_policy_attachment" "lambda_logs" {
role = aws_iam_role.grange_rest_dynlambda_iam_role.name
policy_arn = aws_iam_policy.lambda_logging.arn
}
// API Gateway + Lambda
resource "aws_api_gateway_resource" "grange_rest_dynlambda_api" {
parent_id = aws_api_gateway_rest_api.grange_rest_api_gateway.root_resource_id
path_part = "grange_rest_dynlambda_api"
rest_api_id = aws_api_gateway_rest_api.grange_rest_api_gateway.id
}
resource "aws_api_gateway_method" "grange_rest_dynlambda_api_get" {
authorization = "NONE"
http_method = "GET"
resource_id = aws_api_gateway_resource.grange_rest_dynlambda_api.id
rest_api_id = aws_api_gateway_rest_api.grange_rest_api_gateway.id
}
resource "aws_api_gateway_method" "grange_rest_dynlambda_api_post" {
authorization = "NONE"
http_method = "POST"
resource_id = aws_api_gateway_resource.grange_rest_dynlambda_api.id
rest_api_id = aws_api_gateway_rest_api.grange_rest_api_gateway.id
}
resource "aws_lambda_permission" "apigw" {
action = "lambda:InvokeFunction"
statement_id = "AllowExecutionFromAPIGateway"
function_name = aws_lambda_function.grange_rest_dynlambda.function_name
principal = "apigateway.amazonaws.com"
source_arn = "${aws_api_gateway_rest_api.grange_rest_api_gateway.execution_arn}/*/*"
}
output "base_url" {
value = aws_api_gateway_deployment.apigwdeployment.invoke_url
}
I inject policy from a JSON file and expect "resource" variable to be passed into JSON. But, that's not how it works
{
"Version": "2012-10-17",
"Statement":[{
"Effect": "Allow",
"Action": [
"dynamodb:BatchGetItem",
"dynamodb:GetItem",
"dynamodb:Query",
"dynamodb:Scan",
"dynamodb:BatchWriteItem",
"dynamodb:PutItem",
"dynamodb:UpdateItem"
],
"Resource": "arn:aws:dynamodb:us-east-2:741573820784:table/${resource}"
}
]
}
What am I missing?
The template_file data source does not replace the variables in the actual file. It just reads the file and provides the "rendered" output directly to your Terraform.
Therefore, you need to change your Terraform where you want to consume the "rendered" output:
Before:
resource "aws_iam_role_policy" "grange_rest_dynlambda_iam_policy" {
policy = file("./iam/grange_rest_dynlambda_policy.json")
role = aws_iam_role.grange_rest_dynlambda_iam_role.id
}
After:
resource "aws_iam_role_policy" "grange_rest_dynlambda_iam_policy" {
policy = data.template_file.task.rendered
role = aws_iam_role.grange_rest_dynlambda_iam_role.id
}
You need to access the rendered property of the template_file data source:
data.template_file.task.rendered
This will replace ${resource} with the value of "${var.stage}_grange_dynamodb".
Please note, that the documentation recommends to use the templatefile function instead of this data source.
Related
i try to create sqs queue and attach access policy to it, The policy is of type "data" - no actual resource is created , its just attached to the newly created sqs queue.
╷
│ Error: Cycle: data.aws_iam_policy_document.sqs_vote_policy, aws_sqs_queue.sqs_vote
│
the tf code:
resource "aws_sqs_queue" "sqs_vote" {
name = "sqs-erjan"
delay_seconds = 0
message_retention_seconds = 86400
receive_wait_time_seconds = 0
policy = data.aws_iam_policy_document.sqs_vote_policy.json
}
data "aws_iam_policy_document" "sqs_vote_policy" {
policy_id = "__default_policy_ID"
statement {
sid = "__console_sub_0"
actions = ["SQS:SendMessage"]
resources = [aws_sqs_queue.sqs_vote.arn]
principals {
type = "AWS"
identifiers = ["*"]
}
effect = "Allow"
condition {
test = "ArnLike"
variable = "AWS:SourceArn"
values = [
aws_sns_topic.vote_sns.arn
]
}
}
statement {
sid = "__owner_statement"
actions = ["SQS:*"]
resources = [aws_sqs_queue.sqs_vote.arn]
principals {
type = "arn:aws:iam::025416187662:root"
identifiers = ["*"]
}
effect = "Allow"
}
# i put depends on to make sure it runs first - but it still gives cycle error
depends_on = [
aws_sqs_queue.sqs_vote,aws_sns_topic.vote_sns
]
}
how to fix it?
Change aws_sqs_queue to:
resource "aws_sqs_queue" "sqs_vote" {
name = "sqs-erjan"
delay_seconds = 0
message_retention_seconds = 86400
receive_wait_time_seconds = 0
}
and use aws_sqs_queue_policy to attach the policy to the queue:
resource "aws_sqs_queue_policy" "test" {
queue_url = aws_sqs_queue.sqs_vote.id
policy = data.aws_iam_policy_document.sqs_vote_policy.json
}
How can I iterate over the JSON rendered data.aws_iam_policy_document documents within an aws_iam_policy?
data "aws_iam_policy_document" "role_1" {
statement {
sid = "CloudFront1"
actions = [
"cloudfront:ListDistributions",
"cloudfront:ListStreamingDistributions"
]
resources = ["*"]
}
}
data "aws_iam_policy_document" "role_2" {
statement {
sid = "CloudFront2"
actions = [
"cloudfront:CreateInvalidation",
"cloudfront:GetDistribution",
"cloudfront:GetInvalidation",
"cloudfront:ListInvalidations"
]
resources = ["*"]
}
}
variable "role_policy_docs" {
type = list(string)
description = "Policies associated with Role"
default = [
"data.aws_iam_policy_document.role_1.json",
"data.aws_iam_policy_document.role_2.json",
]
}
locals {
role_policy_docs = { for s in var.role_policy_docs: index(var.role_policy_docs, s) => s}
}
resource "aws_iam_policy" "role" {
for_each = local.role_policy_docs
name = format("RolePolicy-%02d", each.key)
description = "Custom Policies for Role"
policy = each.value
}
resource "aws_iam_role_policy_attachment" "role" {
for_each = { for p in aws_iam_policy.role : p.name => p.arn }
role = aws_iam_role.role.name
policy_arn = each.value
}
This example has been reduced down to the very basics. The policy documents are dynamically generated with the source_json and override_json conventions. I cannot simply combine the statements into a single policy document.
Terraform Error:
Error: "policy" contains an invalid JSON policy
on role.tf line 35, in resource "aws_iam_policy" "role":
35: policy = each.value
This:
variable "role_policy_docs" {
type = list(string)
description = "Policies associated with Role"
default = [
"data.aws_iam_policy_document.role_1.json",
"data.aws_iam_policy_document.role_2.json",
]
}
Is literally defining those default values as strings, so what you're getting is this:
+ role_policy_docs = {
+ 0 = "data.aws_iam_policy_document.role_1.json"
+ 1 = "data.aws_iam_policy_document.role_2.json"
}
If you tried removing the quotations around the data blocks, it will not be valid because you cannot use variables in default definitions. Instead, assign your policy documents to a new local, and use that local in your for loop instead:
locals {
role_policies = [
data.aws_iam_policy_document.role_1.json,
data.aws_iam_policy_document.role_2.json,
]
role_policy_docs = {
for s in local.role_policies :
index(local.role_policies, s) => s
}
}
I have a Terraform resource that creates a backup of an EC2 instance in AWS Backup. I am trying to choose my instances based on tags. So by referring to Terraform docs online (Selecting Backups By Tag), I created a resource that looks as below:
resource "aws_backup_selection" "select_lin_config" {
iam_role_arn = "arn:aws:iam::abc"
name = "lin_config"
plan_id = aws_backup_plan.bkp_plan_ec2.id
selection_tag {
type = "STRINGEQUALS"
key = "Name"
value = "config_lin1"
}
}
When I do a terraform apply, I am getting below error:
Error: error creating Backup Selection: InvalidParameterValueException: Invalid selection conditions Condition(conditionType=STRINGEQUALS, conditionKey=Name, conditionValue=config_lin1)
{
RespMetadata: {
StatusCode: 400,
RequestID: "587a331c-e218-4341-9de1-a69a3ef7ec21"
},
Code_: "ERROR_3309",
Context: "Condition(conditionType=STRINGEQUALS, conditionKey=Name, conditionValue=config_lin1)",
Message_: "Invalid selection conditions Condition(conditionType=STRINGEQUALS, conditionKey=Name, conditionValue=config_lin1)"
}
I used the following example almost as it is from Terraform documentation and it worked. Copy and paste the following into your Terraform code and try it out.
Just to be sure, you might want to upgrade the AWS provider to the latest version using terraform init -upgrade. My AWS provider version is 3.26.0.
resource "aws_backup_vault" "example" {
name = "example_backup_vault"
}
resource "aws_backup_plan" "example" {
name = "tf_example_backup_plan"
rule {
rule_name = "tf_example_backup_rule"
target_vault_name = aws_backup_vault.example.name
schedule = "cron(0 12 * * ? *)"
}
advanced_backup_setting {
backup_options = {
WindowsVSS = "enabled"
}
resource_type = "EC2"
}
}
resource "aws_iam_role" "example" {
name = "example"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "allow",
"Principal": {
"Service": ["backup.amazonaws.com"]
}
}
]
}
POLICY
}
resource "aws_iam_role_policy_attachment" "example" {
policy_arn = "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup"
role = aws_iam_role.example.name
}
resource "aws_backup_selection" "example" {
iam_role_arn = aws_iam_role.example.arn
name = "tf_example_backup_selection"
plan_id = aws_backup_plan.example.id
selection_tag {
type = "STRINGEQUALS"
key = "foo"
value = "bar"
}
}
My terraform script for deploying an HTTP API looks like the following. I am getting the following error when I run this -
error creating API Gateway v2 stage: BadRequestException: Insufficient permissions to enable logging
Do I need to add something else to make it work?
resource "aws_cloudwatch_log_group" "api_gateway_log_group" {
name = "/aws/apigateway/${var.location}-${var.custom_tags.Layer}-demo-publish-api"
retention_in_days = 7
tags = var.custom_tags
}
resource "aws_apigatewayv2_api" "demo_publish_api" {
name = "${var.location}-${var.custom_tags.Layer}-demo-publish-api"
description = "API to publish event payloads"
protocol_type = "HTTP"
tags = var.custom_tags
}
resource "aws_apigatewayv2_vpc_link" "demo_vpc_link" {
name = "${var.location}-${var.custom_tags.Layer}-demo-vpc-link"
security_group_ids = local.security_group_id_list
subnet_ids = local.subnet_ids_list
tags = var.custom_tags
}
resource "aws_apigatewayv2_integration" "demo_apigateway_integration" {
api_id = aws_apigatewayv2_api.demo_publish_api.id
integration_type = "HTTP_PROXY"
connection_type = "VPC_LINK"
integration_uri = var.alb_listener_arn
connection_id = aws_apigatewayv2_vpc_link.demo_vpc_link.id
integration_method = "POST"
timeout_milliseconds = var.api_timeout_milliseconds
}
resource "aws_apigatewayv2_route" "demo_publish_api_route" {
api_id = aws_apigatewayv2_api.demo_publish_api.id
route_key = "POST /api/event"
target = "integrations/${aws_apigatewayv2_integration.demo_apigateway_integration.id}"
}
resource "aws_apigatewayv2_stage" "demo_publish_api_default_stage" {
depends_on = [aws_cloudwatch_log_group.api_gateway_log_group]
api_id = aws_apigatewayv2_api.demo_publish_api.id
name = "$default"
auto_deploy = true
tags = var.custom_tags
route_settings {
route_key = aws_apigatewayv2_route.demo_publish_api_route.route_key
throttling_burst_limit = var.throttling_burst_limit
throttling_rate_limit = var.throttling_rate_limit
}
default_route_settings {
detailed_metrics_enabled = true
logging_level = "INFO"
}
access_log_settings {
destination_arn = aws_cloudwatch_log_group.api_gateway_log_group.arn
format = jsonencode({ "requestId":"$context.requestId", "ip": "$context.identity.sourceIp"})
}
}
I was stuck on this for a couple of days before reaching out to AWS support. If you have been deploying a lot of HTTP APIs, then you might have run into the same issue where an IAM policy gets very large.
Run this AWS CLI command to find the associated CloudWatch Logs resource policy:
aws logs describe-resource-policies
Look for AWSLogDeliveryWrite20150319. You'll notice this policy has a large number of associated LogGroup resources. You have three options:
Adjust this policy by removing some of the potentially unused entries.
Change the resource list to "*"
You can add another policy. Based on this policy, split the resource records between them.
Apply updates via this AWS CLI command:
aws logs put-resource-policy
Here's the command I ran to set resources. Use "*" for the policy:
aws logs put-resource-policy --policy-name AWSLogDeliveryWrite20150319 --policy-document "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AWSLogDeliveryWrite\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"delivery.logs.amazonaws.com\"},\"Action\":[\"logs:CreateLogStream\",\"logs:PutLogEvents\"],\"Resource\":[\"*\"]}]}"
#Marcin Your initial comment about the aws_api_gateway_account was correct. I added the following resources and now it is working fine -
resource "aws_api_gateway_account" "demo" {
cloudwatch_role_arn = var.apigw_cloudwatch_role_arn
}
data "aws_iam_policy_document" "demo_apigw_allow_manage_resources" {
version = "2012-10-17"
statement {
actions = [
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:GetLogEvents",
"logs:FilterLogEvents"
]
resources = [
"*"
]
}
statement {
actions = [
"logs:CreateLogDelivery",
"logs:PutResourcePolicy",
"logs:UpdateLogDelivery",
"logs:DeleteLogDelivery",
"logs:CreateLogGroup",
"logs:DescribeResourcePolicies",
"logs:GetLogDelivery",
"logs:ListLogDeliveries"
]
resources = [
"*"
]
}
}
data "aws_iam_policy_document" "demo_apigw_allow_assume_role" {
version = "2012-10-17"
statement {
effect = "Allow"
actions = [
"sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["apigateway.amazonaws.com"]
}
}
}
resource "aws_iam_role_policy" "demo_apigw_allow_manage_resources" {
policy = data.aws_iam_policy_document.demo_apigw_allow_manage_resources.json
role = aws_iam_role.demo_apigw_cloudwatch_role.id
name = var.demo-apigw-manage-resources_policy_name
}
resource "aws_iam_role" "demo_apigw_cloudwatch_role" {
name = "demo_apigw_cloudwatch_role"
tags = var.custom_tags
assume_role_policy = data.aws_iam_policy_document.demo_apigw_allow_assume_role.json
}
You can route CW logs (aws_cloudwatch_log_group) to /aws/vendedlogs/* and it will resolve issue. Or create aws_api_gateway_account
I am trying to setup a simple demo endpoint via AWS API Gateway. Bellow is Terraform manifest which describes it.
It is essentially a GET /demo/hello/world endpoint which accepts a query string parameter return_to.
The terraform correctly creates all resources in AWS.
However, when I then make a request to gateway at /demo/hello/world?return_to=bbb, the backend service receives this request:
/demo/hello/world%3Freturn_to=bbb?return_to=bbb
As you can see the ?return_to=bbb from API Gateway is being URL encoded as if it were part of the path and then another query string is appended at the end.
Anybody could help me out how to fix this? I have been going through all the settings for few hours and can't figure out what is the issue and how to fix it.
resource "aws_api_gateway_rest_api" "api" {
name = "origin-${var.name}.${data.terraform_remote_state.setup.outputs.domain-name}"
description = "Proxy to handle requests to our API test"
}
resource "aws_api_gateway_resource" "demo" {
depends_on = ["aws_api_gateway_rest_api.api"]
rest_api_id = "${aws_api_gateway_rest_api.api.id}"
parent_id = "${aws_api_gateway_rest_api.api.root_resource_id}"
path_part = "demo"
}
resource "aws_api_gateway_resource" "hello" {
depends_on = ["aws_api_gateway_rest_api.api", "aws_api_gateway_resource.demo"]
rest_api_id = "${aws_api_gateway_rest_api.api.id}"
parent_id = "${aws_api_gateway_resource.demo.id}"
path_part = "hello"
}
resource "aws_api_gateway_resource" "world" {
depends_on = ["aws_api_gateway_rest_api.api", "aws_api_gateway_resource.hello"]
rest_api_id = "${aws_api_gateway_rest_api.api.id}"
parent_id = "${aws_api_gateway_resource.hello.id}"
path_part = "world"
}
resource "aws_api_gateway_method" "hello-world" {
depends_on = ["aws_api_gateway_resource.world"]
rest_api_id = "${aws_api_gateway_rest_api.api.id}"
resource_id = "${aws_api_gateway_resource.world.id}"
http_method = "GET"
authorization = "NONE"
request_parameters = {
"method.request.querystring.return_to" = true
}
}
resource "aws_api_gateway_integration" "hello-world" {
depends_on = ["aws_api_gateway_method.hello-world"]
rest_api_id = "${aws_api_gateway_rest_api.api.id}"
resource_id = "${aws_api_gateway_resource.world.id}"
http_method = "${aws_api_gateway_method.hello-world.http_method}"
integration_http_method = "GET"
type = "HTTP"
uri = "http://${lookup(var.demo-map, var.environment)}/demo/hello/world"
connection_type = "VPC_LINK"
connection_id = "${data.aws_api_gateway_vpc_link.vpclink.id}"
request_parameters = {
"integration.request.querystring.return_to" = "method.request.querystring.return_to"
}
}
I had the same error. Having the query parameters in the aws_api_gateway_method and removing from aws_api_gateway_integration solved my problem