Following is the variable declaration for my terraform module which is used in our cloud and inputs for these variables are obtained via one of the automation solutions. Now, I would like to reproduce one of the issues for which I would like to create a tfvars file from the below variable definition.
variables.tf:
variable "docker" {
type = object({
image_name = string
image_location = string
docker_ports = object({
internal = number
external = number
})
rmodelling = object({
lang = object({
version = number
enabled = bool
policy = object({
identification = string
})
})
impl = object({
version = number
enabled = bool
policy = object({
identification = string
})
})
})
})
}
I have tried something like this, but for the next nested objected I am not quite sure on how those can be put down. Can someone guide or shed some pointers?
terraform.tfvars:
docker = {
image_name = "Ubuntu 18.04"
image_location = "https://registry.jd.com/ubuntu/<custom_location>"
docker_ports = {
internal = 80
external = 443
}
rmodelling = { ??
???
An example of a valid value for your var.docker is:
docker = {
image_name = "Ubuntu 18.04"
image_location = "https://registry.jd.com/ubuntu/<custom_location>"
docker_ports = {
internal = 80
external = 443
}
rmodelling = {
lang = {
version = 3
enabled = true
policy = {
identification = "test"
}
}
impl = {
version = 4
enabled = false
policy = {
identification = "test2"
}
}
}
}
Related
getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come
I'm trying to deploy an Azure application gateway in Terraform, in particular I need to create a nested dynamic blocks.
I have tried to implement this (this part of the code is in a file called application_gateway.tf):
dynamic "url_path_map" {
for_each = var.path_maps
content {
name = outer_block.value["name"]
default_backend_address_pool_name = outer_block.value["backend"]
default_backend_http_settings_name = outer_block.value["backend_set"]
dynamic "url_path_rule" {
for_each = url_path_map.value["upm"]
content{
name = url_path_rule.value["name_rule"]
paths = url_path_rule.value["path"]
backend_address_pool_name = url_path_rule.value["backend"]
backend_http_settings_name = url_path_rule.value["backend_set"]
}
}
}
}
The correspective variables.tf file is:
variable "path_maps" {
default = []
type = list(object({
name = string
backend = string
backend_set = string
upm = list(object({
name_rule = string
path = string
backend = string
backend_set = string
}))
}))
}
With the following module call (this part of the script is in another file called main.tf):
module "application_gateway" {
source = "../modules/resources-hub/application_gateway"
resource_group_name = module.resource_group.name
resource_group_location = module.resource_group.location
subnet_id = module.agw_subnet.id
public_ip_address_id = module.app_gw_pip.id
firewall_policy_id = module.agw_web_application_firewall.id
log_analytics_workspace_id = module.log_analytics_workspace.id
path_maps = [{name = "dev_url_path_name", backend = "devBackend", backend_set = "devHttpSetting", name_rule = "dev_path_rule_name_app", path = "/app/*"},
{name = "tst_url_path_name", backend = "tstBackend", backend_set = "tstHttpSetting", name_rule = "dev_path_rule_name_edp", path = "/edp/*"},
{name = "uat_url_path_name", backend = "uatBackend", backend_set = "uatHttpSetting", name_rule = "dev_path_rule_name_internal", path = "/internal/*"}]
}
At the end, what I would like to obtain is this but using the nested dynamic blocks:
url_path_map {
name = "dev_url_path_name"
default_backend_address_pool_name = "devBackend"
default_backend_http_settings_name = "devHttpSetting"
path_rule {
name = "dev_path_rule_name_app_edp"
paths = ["/app/*"]
backend_address_pool_name = "devBackend"
backend_http_settings_name = "devHttpSetting"
}
path_rule {
name = "dev_path_rule_name_internal"
paths = ["/edp/*"]
backend_address_pool_name = "devBackend"
backend_http_settings_name = "devHttpSetting"
}
path_rule {
name = "dev_path_rule_name_internal"
paths = ["/internal/*"]
backend_address_pool_name = "sinkPool"
backend_http_settings_name = "devHttpSetting"
}
}
This is the error that I get if I run "terraform validate":
enter image description here
Thank you in advance!
I have tried the code above but I got the error in the image.
The first problem is on the definition of the variable "path_maps", because is different as the path_maps format that you are passing to the module.
You can modify the path_maps before passing to the module with the correct format, or you can change variable to fit the format that you define.
thats why you are getting the error that "upm" is required
Trying to understand what exactly am I missing here in my terraform module(Cloudflare).
Currently developing it but I've hit a stone with this one:
Code:
## Variables
variable "filters" {
description = "required filters for the firwall rules"
type = map(object({
description = string
expression = string
}))
default = {
"filters1" = {
description = "test-rule"
expression = "test-exps"
}
}
}
variable "firewall-rules" {
description = "A set of Firewall rules"
type = map(object({
description = string
action = string
}))
default = {
"rules1" = {
description = "value"
action = "value"
}
}
## Resources
resource "cloudflare_filter" "firewall-filters" {
zone_id = var.zone_id
for_each = var.filters
description = each.value["description"]
expression = each.value["expression"]
}
resource "cloudflare_firewall_rule" "firewall-rules" {
zone_id = var.zone_id
for_each = var.firewall-rules
filter_id = cloudflare_filter.firewall-filters
description = each.value["description"]
action = each.value["action"]
}
Ignore "value" it's for testing purpose.
Technically shouldn't the for_each work fine, while filter_id points to
"cloudflare_filter.firewall-filters", which inherits the values from the variable "filters".
Thank you in advance!
I am trying to create aws codepipeline using resources in TF. here is my resources section in m,y TF.
resource "aws_codepipeline" "codepipeline" {
name = var.name
role_arn = var.role_arn
artifact_store {
location = var.location
type = var.type
}
stage {
name = var.stage1_name
action {
name = var.action1_name
category = var.source_category
owner = var.source_owner
provider = var.source_provider
version = var.source_version
output_artifacts = var.source_output_artifacts
configuration = {
ConnectionArn = var.connection_arn
FullRepositoryId = var.full_repository_id
BranchName = var.branch_name
OutputArtifactFormat = var.output_artifact_format
}
}
}
stage {
name = var.stage2_name
action {
name = var.action2_name
category = var.build_category
owner = var.build_owner
provider = var.build_provider
input_artifacts = var.input_artifacts
output_artifacts = var.build_output_artifacts
version = var.build_version
configuration = {
ProjectName = var.project_name
EnvironmentVariables = var.environment_variables /*jsonencode(
[
{
name = var.environment_name
type = var.environment_type
value = var.environment_value
}
]
) */
}
}
}
}
In my TF modules section, creating codepipeline by calling the resources given above. my modules code is
module "codepipeline_notification" {
source = "../../modules/codepipeline"
name = var.codepipeline_lambda_notification_name
role_arn = aws_iam_role.cp_lambda_deploy_role.arn #var.codepipeline_lambda_notification_role_arn
location = module.s3_codepipeline_artifact.s3_bucket_account_id #var.codepipeline_lambda_notification_location
type = var.codepipeline_lambda_notification_type
stage1_name = var.codepipeline_lambda_notification_stage1_name
action1_name = var.codepipeline_lambda_notification_action1_name
source_category = var.codepipeline_lambda_notification_source_category
source_owner = var.codepipeline_lambda_notification_source_owner
source_provider = var.codepipeline_lambda_notification_source_provider
source_version = var.codepipeline_lambda_notification_source_version
source_output_artifacts = var.codepipeline_lambda_notification_source_output_artifacts
full_repository_id = var.codepipeline_lambda_notification_full_repository_id
branch_name = var.codepipeline_lambda_notification_branch_name
output_artifact_format = var.codepipeline_lambda_notification_output_artifact_format
environment_variables = jsonencode(
[
{
name = var.codepipeline_lambda_notification_environment_name
type = var.codepipeline_lambda_notification_environment_type
value = var.codepipeline_lambda_notification_environment_value
}
]
)
build_output_artifacts = var.codepipeline_lambda_notification_build_output_artifacts
connection_arn = module.codestarconnections.arn
stage2_name = var.codepipeline_lambda_notification_stage2_name
action2_name = var.codepipeline_lambda_notification_action2_name
build_category = var.codepipeline_lambda_notification_build_category
build_owner = var.codepipeline_lambda_notification_build_owner
build_provider = var.codepipeline_lambda_notification_build_provider
build_version = var.codepipeline_lambda_notification_build_version
input_artifacts = var.codepipeline_lambda_notification_input_artifacts
project_name = module.codebuild_notification.name
}
with this approach, I am trying to create 4 pipelines where one pipeline has only 2 stages and other 2 pipeline has 3 stages, If I define 3 stages in resources then Terraform forces the modules to create 3 stages in all pipelines where I need onyl two stages. Is there any way in terraform to define in resources and use the resource in modules based on condition
Not sure if you ever got an answer to your question, but yes, there is a way. It's called Dynamic Pipeline. I have a repository that walks you through the usage of the dynamic pipeline. In short, you treat the resource like a dynamic resource using each statement and passing in the configuration as a map.
The module looks like this:
resource "aws_codepipeline" "codepipeline" {
for_each = var.code_pipeline
name = "${local.name_prefix}-${var.AppName}"
role_arn = each.value["code_pipeline_role_arn"]
tags = {
Pipeline_Key = each.key
}
artifact_store {
type = lookup(each.value, "artifact_store", null) == null ? "" : lookup(each.value.artifact_store, "type", "S3")
location = lookup(each.value, "artifact_store", null) == null ? null : lookup(each.value.artifact_store, "artifact_bucket", null)
}
dynamic "stage" {
for_each = lookup(each.value, "stages", {})
iterator = stage
content {
name = lookup(stage.value, "name")
dynamic "action" {
for_each = lookup(stage.value, "actions", {}) //[stage.key]
iterator = action
content {
name = action.value["name"]
category = action.value["category"]
owner = action.value["owner"]
provider = action.value["provider"]
version = action.value["version"]
run_order = action.value["run_order"]
input_artifacts = lookup(action.value, "input_artifacts", null)
output_artifacts = lookup(action.value, "output_artifacts", null)
configuration = action.value["configuration"]
namespace = lookup(action.value, "namespace", null)
}
}
}
}
}
Executing Module
module "code_pipeline" {
source = "../module-aws-codepipeline" #using module locally
#source = "your-github-repository/aws-codepipeline" #using github repository
AppName = "My_new_pipeline"
code_pipeline = local.code_pipeline
}
Sample locals.tf with pipeline variable
locals {
/*
DECLARE enviornment variables. Note each Action does not require environment variables
*/
action_second_stage_variables = [
{
name = "PIPELINE_EXECUTION_ID"
type = "PLAINTEXT"
value = "#{codepipeline.PipelineExecutionId}"
},
{
name = "NamespaceVariable"
type = "PLAINTEXT"
value = "some_value"
},
]
action_third_stage_variables = [
{
name = "PL_VARIABLE_1"
type = "PLAINTEXT"
value = "VALUE1"
},
{
name = "PL_VARIABLE 2"
type = "PLAINTEXT"
value = "VALUE2"
},
{
name = "PL_VARIABLE_3"
type = "PLAINTEXT"
value = "VAUE3"
},
{
name = "PL_VARIABLE_4"
type = "PLAINTEXT"
value = "#{BLD.NamespaceVariable}"
},
]
/*
BUILD YOUR STAGES
*/
code_pipeline = {
codepipeline-configs = {
code_pipeline_role_arn = "arn:aws:iam::aws_account_name:role/role_name"
artifact_store = {
type = "S3"
artifact_bucket = "your-aws-bucket-name"
}
stages = {
stage_1 = {
name = "Download"
actions = {
action_1 = {
run_order = 1
category = "Source"
name = "First_Stage"
owner = "AWS"
provider = "CodeCommit"
version = "1"
output_artifacts = ["download_ouput"]
configuration = {
RepositoryName = "Codecommit_target_repo"
BranchName = "main"
PollForSourceChanges = true
OutputArtifactFormat = "CODE_ZIP"
}
}
}
}
stage_2 = {
name = "Build"
actions = {
action_1 = {
run_order = 2
category = "Build"
name = "Second_Stage"
owner = "AWS"
provider = "CodeBuild"
version = "1"
namespace = "BLD"
input_artifacts = ["Download_ouput"]
output_artifacts = ["build_outputs"]
configuration = {
ProjectName = "codebuild_project_name_for_second_stage"
EnvironmentVariables = jsonencode(local.action_second_stage_variables)
}
}
}
}
stage_3 = {
name = "Validation"
actions = {
action_1 = {
run_order = 1
name = "Third_Stage"
category = "Build"
owner = "AWS"
provider = "CodeBuild"
version = "1"
input_artifacts = ["build_outputs"]
output_artifacts = ["validation_outputs"]
configuration = {
ProjectName = "codebuild_project_name_for_third_stage"
EnvironmentVariables = jsonencode(local.action_third_stage_variables)
}
}
}
}
}
}
}
}
The full use of the module can be found in this GitHub repository. In your case, you could pass in multiple resources to create various pipelines in one module with unique and custom stages and actions. I hope this helps.
I have the following in variables.tf
variable "envoy_config" {
description = "Envoy Docker Image Version"
type = object({
routes = list(object({
cluster_name = string
host_rewrite_url = string
prefix = string
headers = object({
name = string
exact_match = string
})
}))
clusters = list(object({
name = string
address = string
}))
})
default = {
routes = []
clusters = []
}
validation {
condition = <not quite sure what to add here>
error_message = "cluster <name> does not exist"
}
}
and then in my variables.tfvars, I have the following:
envoy_config = {
routes = [{
host_rewrite_url = "myurl"
prefix = "myprefix"
cluster_name = "mycluster"
headers = {
name = ":method"
exact_match = "GET"
}
}]
clusters = [{
name = "mycluster"
address = "myurl"
}]
}
I want to make sure that every ${envoy_config.routes[*].cluster_name and ${envoy_config.routes[*]. host_rewrite_url exists in ${envoy_config.clusters[*].name and ${envoy_config.clusters[*].address respectively.
What condition should I be adding the in the validation step?
The examples I found all deal with regex
I am using terraform version v0.12.28