I am fairly new to Terraform so I'm still learning the more advanced elements of the language.
I have created a template that creates 4 azure virtual desktop host pools. I have also created within the same template scaling plans and was trying to create a for_each loop, on the scaling plan, however when it comes to the hostpool_id requirement on the scaling plan I am not sure how to get it to cycle through the 4 hostpool ids that will be created.
MAIN
#############
# HOSTPOOLS #
#############
resource "azurerm_virtual_desktop_host_pool" "developer_dev" {
name = var.developer_dev_hostpool.name
resource_group_name = var.rg_avd_name
location = var.rg_avd_location
friendly_name = var.developer_dev_hostpool.friendly_name
description = var.developer_dev_hostpool.description
type = var.developer_dev_hostpool.type
maximum_sessions_allowed = var.developer_dev_hostpool.max_sessions
load_balancer_type = var.developer_dev_hostpool.lb_type
scheduled_agent_updates {
enabled = true
timezone = "GMT Standard Time"
schedule {
day_of_week = "Saturday"
hour_of_day = "02"
}
}
}
resource "azurerm_virtual_desktop_host_pool" "developer_prod" {
name = var.developer_prod_hostpool.name
resource_group_name = var.rg_avd_name
location = var.rg_avd_location
friendly_name = var.developer_prod_hostpool.friendly_name
description = var.developer_prod_hostpool.description
type = var.developer_prod_hostpool.type
maximum_sessions_allowed = var.developer_prod_hostpool.max_sessions
load_balancer_type = var.developer_prod_hostpool.lb_type
scheduled_agent_updates {
enabled = true
timezone = "GMT Standard Time"
schedule {
day_of_week = "Saturday"
hour_of_day = "02"
}
}
}
resource "azurerm_virtual_desktop_host_pool" "front_office" {
name = var.front_office_hostpool.name
resource_group_name = var.rg_avd_name
location = var.rg_avd_location
friendly_name = var.front_office_hostpool.friendly_name
description = var.front_office_hostpool.description
type = var.front_office_hostpool.type
maximum_sessions_allowed = var.front_office_hostpool.max_sessions
load_balancer_type = var.front_office_hostpool.lb_type
scheduled_agent_updates {
enabled = true
timezone = "GMT Standard Time"
schedule {
day_of_week = "Saturday"
hour_of_day = "02"
}
}
}
resource "azurerm_virtual_desktop_host_pool" "back_office" {
name = var.back_office_hostpool.name
resource_group_name = var.rg_avd_name
location = var.rg_avd_location
friendly_name = var.back_office_hostpool.friendly_name
description = var.back_office_hostpool.description
type = var.back_office_hostpool.type
maximum_sessions_allowed = var.back_office_hostpool.max_sessions
load_balancer_type = var.back_office_hostpool.lb_type
scheduled_agent_updates {
enabled = true
timezone = "GMT Standard Time"
schedule {
day_of_week = "Saturday"
hour_of_day = "02"
}
}
}
#################
# SCALING PLANS #
#################
resource "azurerm_virtual_desktop_scaling_plan" "sp_weekdays_developer" {
for_each = var.sp_weekdays
name = each.value.sp_name
resource_group_name = var.rg_avd_name
location = var.rg_avd_location
friendly_name = each.value.sp_friendly_name
description = each.value.sp_description
time_zone = "GMT Standard Time"
schedule {
name = each.value.sch_name
days_of_week = ["Monday","Tuesday","Wednesday","Thursday","Friday"]
ramp_up_start_time = "07:30"
ramp_up_load_balancing_algorithm = each.value.lb_alg
ramp_up_minimum_hosts_percent = each.value.ramp_up_min_hosts
ramp_up_capacity_threshold_percent = each.value.ramp_up_cap_threshold
peak_start_time = "08:00"
peak_load_balancing_algorithm = each.value.lb_alg
ramp_down_start_time = "18:00"
ramp_down_load_balancing_algorithm = each.value.lb_alg
ramp_down_minimum_hosts_percent = "80"
ramp_down_force_logoff_users = true
ramp_down_wait_time_minutes = "60"
ramp_down_notification_message = "Your session will be logged off shortly, please save your work."
ramp_down_capacity_threshold_percent = "20"
ramp_down_stop_hosts_when = "ZeroActiveSessions"
off_peak_start_time = "20:00"
off_peak_load_balancing_algorithm = each.value.lb_alg
}
host_pool {
hostpool_id = azurerm_virtual_desktop_host_pool.developer_dev.id
scaling_plan_enabled = true
}
}
variables
######################
# HOSTPOOL VARIABLES #
######################
variable "developer_dev_hostpool" {
default = {
name = "avdhp-developer-dev-uks-001"
friendly_name = "Developer – Development"
description = "Hostpool for developers and contractors working in the development environment"
type = "Pooled"
max_sessions = "8"
lb_type = "DepthFirst"
}
}
variable "developer_prod_hostpool" {
default = {
name = "avdhp-developer-prod-uks-001"
friendly_name = "Developer – Production"
description = "Hostpool for developers and contractors working in the production environment"
type = "Pooled"
max_sessions = "8"
lb_type = "DepthFirst"
}
}
variable "front_office_hostpool" {
default = {
name = "avdhp-front-office-uks-001"
friendly_name = "Front Office"
description = "Hostpool for front office staff"
type = "Pooled"
max_sessions = "16"
lb_type = "BreadthFirst"
}
}
variable "back_office_hostpool" {
default = {
name = "avdhp-back-office-uks-001"
friendly_name = "Back Office "
description = "Hostpool for back office staff"
type = "Pooled"
max_sessions = "12"
lb_type = "DepthFirst"
}
}
##########################
# SCALING PLAN VARIABLES #
##########################
variable "sp_weekdays" {
description = "scaling plan for weekdays"
default = {
developer_dev = {
sp_name = "avdsp1"
sp_friendly_name = "Developer Weekday Scaling Plan"
sp_description = "This scaling plan is for the developers during the week."
sch_name = "Weekdays"
lb_alg = "DepthFirst"
ramp_up_min_hosts = "50"
ramp_up_cap_threshold = "60"
}
developer_prod = {
sp_name = "avdsp3"
sp_friendly_name = "Developer Weekday Scaling Plan"
sp_description = "This scaling plan is for the developers during the week."
sch_name = "Weekdays"
lb_alg = "DepthFirst"
ramp_up_min_hosts = "50"
ramp_up_cap_threshold = "60"
}
back_office = {
sp_name = "avdsp4"
sp_friendly_name = "Back Office Weekday Scaling Plan"
sp_description = "This scaling plan is for the back office during the week."
sch_name = "Weekdays"
lb_alg = "DepthFirst"
ramp_up_min_hosts = "50"
ramp_up_cap_threshold = "60"
}
front_office = {
sp_name = "avdsp5"
sp_friendly_name = "Front Office Weekday Scaling Plan"
sp_description = "This scaling plan is for the front office during the week."
sch_name = "Weekdays"
lg_alg = "BreadthFirst"
ramp_up_min_hosts = "30"
ramp_up_cap_threshold = "75"
}
}
}
Related
getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come
The code as follows gives me the output of "Provisioned range for reads" as 5 - 10 in the Additional settings, Read/write capacity. I would like to set it as 1 - 10. How to do it?
module "dynamodb_table" {
source = "terraform-aws-modules/dynamodb-table/aws"
version = "3.1.1"
name = var.dbname
hash_key = var.hash_key
billing_mode = "PROVISIONED"
read_capacity = 5
write_capacity = 1
autoscaling_enabled = true
autoscaling_read = {
scale_in_cooldown = 50
scale_out_cooldown = 40
target_value = 70
min_capacity = 1
max_capacity = 10
}
autoscaling_write = {
scale_in_cooldown = 50
scale_out_cooldown = 40
target_value = 70
min_capacity = 1
max_capacity = 10
}
attributes = [
{
name = "user_id"
type = "S"
}
]
tags = {
Terraform = "true"
Environment = var.environment
}
}
Code from module:
resource "aws_appautoscaling_target" "table_read" {
count = var.create_table && var.autoscaling_enabled && length(var.autoscaling_read) > 0 ? 1 : 0
max_capacity = var.autoscaling_read["max_capacity"]
min_capacity = var.read_capacity
resource_id = "table/${aws_dynamodb_table.autoscaled[0].name}"
scalable_dimension = "dynamodb:table:ReadCapacityUnits"
service_namespace = "dynamodb"
}
My bet is your read_capacity is your min used for autoscaling and your max is defined within your autoscaling module.
Try this:
module "dynamodb_table" {
source = "terraform-aws-modules/dynamodb-table/aws"
version = "3.1.1"
name = var.dbname
hash_key = var.hash_key
billing_mode = "PROVISIONED"
read_capacity = 1
write_capacity = 1
autoscaling_enabled = true
autoscaling_read = {
scale_in_cooldown = 50
scale_out_cooldown = 40
target_value = 70
max_capacity = 10
}
autoscaling_write = {
scale_in_cooldown = 50
scale_out_cooldown = 40
target_value = 70
max_capacity = 10
}
attributes = [
{
name = "user_id"
type = "S"
}
]
tags = {
Terraform = "true"
Environment = var.environment
}
}
If that fails try something like the following:
resource "aws_appautoscaling_target" "dynamodb-test-table_read_target" {
max_capacity = 10
min_capacity = 1
resource_id = "table/${aws_dynamodb_table.dynamodb-test-table.name}"
scalable_dimension = "dynamodb:table:ReadCapacityUnits"
service_namespace = "dynamodb"
}
I think what you really want is the code from this reference:
aws_appautoscaling_target
aws_appautoscaling_policy
Here is my implementation from main.tf:
resource "aws_dynamodb_table" "table" {
name = var.table_name
billing_mode = "PROVISIONED"
read_capacity = var.read_capacity_target
write_capacity = var.write_capacity_target
hash_key = var.partition_key_name
range_key = var.sort_key_name
ttl = var.ttl
point_in_time_recovery { enabled = true }
server_side_encryption { enabled = true }
lifecycle {ignore_changes = [read_capacity, write_capacity]}
attribute {
name = var.partition_key_name
type = var.partition_key_type
}
attribute {
name = var.sort_key_name
type = var.sort_key_type
}
tags = {
"Environment" = var.environment
}
}
resource "aws_appautoscaling_target" "dynamodb_table_read_target" {
max_capacity = var.read_capacity_maximum
min_capacity = var.read_capacity_minimum
resource_id = "table/${aws_dynamodb_table.table.name}"
scalable_dimension = "dynamodb:table:ReadCapacityUnits"
service_namespace = "dynamodb"
}
resource "aws_appautoscaling_policy" "dynamodb_table_read_policy" {
name = "DynamoDBReadCapacityUtilization:${aws_appautoscaling_target.dynamodb_table_read_target.resource_id}"
policy_type = "TargetTrackingScaling"
resource_id = aws_appautoscaling_target.dynamodb_table_read_target.resource_id
scalable_dimension = aws_appautoscaling_target.dynamodb_table_read_target.scalable_dimension
service_namespace = aws_appautoscaling_target.dynamodb_table_read_target.service_namespace
target_tracking_scaling_policy_configuration {
predefined_metric_specification {
predefined_metric_type = "DynamoDBReadCapacityUtilization"
}
target_value = 70.0
}
}
And here is the variables.tf code:
variable "vpc_id" {
description = "Required. The Virtual Private Network containing all services."
type = string
}
variable "table_name" {
description = "Required. The name of the DDB table. No spaces."
type = string
}
variable "partition_key_name" {
description = "Required. Name of the partition key. No spaces."
type = string
}
variable "partition_key_type" {
description = "Required. Partition key type: 'S' or 'N'"
type = string
}
variable "sort_key_name" {
description = "Required. The name of the Sort key. No spaces."
type = string
}
variable "sort_key_type" {
description = "Required. Sort key type: 'S' or 'N'"
type = string
}
variable "ttl" {
description = "Required. True if TTL or time-to-live is enabled"
type = bool
}
variable "read_capacity_maximum" {
description = "Required. Maximum allowed autoscale range."
type = number
default = 10
}
variable "read_capacity_minimum" {
description = "Required. Minimum allowed autoscale range."
type = number
default = 2
}
variable "read_capacity_target" {
description = "Required. Target within autoscale range."
type = number
default = 5
}
variable "write_capacity_maximum" {
description = "Required. Maximum allowed autoscale range."
type = number
default = 10
}
variable "write_capacity_minimum" {
description = "Required. Minimum allowed autoscale range."
type = string
default = 2
}
variable "write_capacity_target" {
description = "Required. Target within autoscale range."
type = number
default = 5
}
variable "environment" {
description = "The staging type: development, production."
default = "development"
type = string
}
I'm trying to create cloudwatch alarms for some specific load balancers.
What if I have to create 100 cloudwatch alarms, do I need to populate the tfvars the way, I'm updating it currently, or is there any other way which is more optimized.
Following is my code.
main.tf
resource "aws_cloudwatch_metric_alarm" "UnHealthyHosts" {
for_each = var.cloudwatch_alarms_map
alarm_name = each.key
comparison_operator = var.cloudwatch_alarm_operator
evaluation_periods = var.cloudwatch_alarm_evaluation_periods
metric_name = var.cloudwatch_alarm_metric
namespace = each.value["alarm_namespace"]
period = var.cloudwatch_alarm_period
statistic = var.cloudwatch_alarm_statistic
threshold = var.cloudwatch_alarm_threshold
alarm_description = var.cloudwatch_alarm_description
actions_enabled = var.cloudwatch_alarm_actions_enabled
alarm_actions = [aws_sns_topic.sns.arn]
dimensions = {
TargetGroup = each.value["target_group_arn"]
LoadBalancer = each.value["load_balancer_arn"]
}
}
variables.tf
variable "cloudwatch_alarms_map" {
type = map(object({
alarm_namespace = string # eg: AWS/ApplicationELB
target_group_arn = string
load_balancer_arn = string
}))
default = {}
}
terraform.tfvars
cloudwatch_alarms_map = {
app1-unhealthy-alarm = {
target_group_arn_suffix = "targetgroup/sample-app1-tg/12de123e123123aa"
load_balancer_arn_suffix = "app/sample-alb-app1-lb/12c5732bd012e47a"
alarm_namespace = "AWS/ApplicationELB"
}
app2-unhealthy-alarm = {
target_group_arn_suffix = "targetgroup/sample-app2-tg/313e7f1ad4a2e373"
load_balancer_arn_suffix = "app/sample-alb-app2-lb/f2c5132bd012e47a"
alarm_namespace = "AWS/ApplicationELB"
}
}
I am trying to register two applications one is a Front End App and the other is a Backend App. I am having corresponding Web Apps for them as well.
I am using Terraform to deploy my apps and all the infrastructure.
But while running TF plan i get the cyclic error. Please find below my code.
Can anyone please help me rectify this
Code for Web App FE
resource "azurerm_app_service" "fe" {
location = module.resourcegroup.resource_group.location
resource_group_name = module.resourcegroup.resource_group.name
tags = module.resourcegroup.resource_group.tags
app_service_plan_id = azurerm_app_service_plan.default.id
name = module.names-web-app-fe.location.app_service.name_unique
identity { type = "SystemAssigned" }
auth_settings {
enabled = true
default_provider = "AzureActiveDirectory"
issuer = format("https://sts.windows.net/%s/", data.azurerm_client_config.default.tenant_id)
runtime_version = "~1"
token_store_enabled = true
unauthenticated_client_action = "RedirectToLoginPage"
additional_login_params = {
"response_type" = "code id_token",
"resource" = azuread_application.app-fe.application_id
}
active_directory {
client_id = azuread_application.app-fe.object_id
client_secret = azuread_application_password.fe-app-sp-secret.application_object_id
allowed_audiences = [format("https://%s.azurewebsites.net", module.names-web-app-fe.location.app_service.name_unique)]
}
}
site_config {
always_on = true
app_command_line = ""
default_documents = []
dotnet_framework_version = "v4.0"
ftps_state = "Disabled"
health_check_path = ""
http2_enabled = true
linux_fx_version = "STATICSITE|1.0"
local_mysql_enabled = false
managed_pipeline_mode = "Integrated"
min_tls_version = "1.2"
#pre_warmed_instance_count = 0
python_version = "3.4"
remote_debugging_enabled = false
remote_debugging_version = "VS2019"
use_32_bit_worker_process = false
websockets_enabled = false
windows_fx_version = ""
cors {
allowed_origins = []
support_credentials = false
}
}
app_settings = {
"WEBSITE_DNS_SERVER" = "168.63.129.16"
"WEBSITE_VNET_ROUTE_ALL" = "1"
}
}
Code for Web App BE
resource "azurerm_app_service" "be" {
location = module.resourcegroup.resource_group.location
resource_group_name = module.resourcegroup.resource_group.name
tags = module.resourcegroup.resource_group.tags
app_service_plan_id = azurerm_app_service_plan.default.id
name = module.names-web-app-be.location.app_service.name_unique
identity { type = "SystemAssigned" }
auth_settings {
enabled = true
default_provider = "AzureActiveDirectory"
issuer = format("https://sts.windows.net/%s/", data.azurerm_client_config.default.tenant_id)
runtime_version = "~1"
token_store_enabled = true
unauthenticated_client_action = "RedirectToLoginPage"
additional_login_params = {
"response_type" = "code id_token",
"resource" = azuread_application.app-be.application_id
}
active_directory {
client_id = azuread_application.app-be.object_id
client_secret = azuread_application_password.be-app-sp-secret.application_object_id
allowed_audiences = []
}
}
site_config {
always_on = true
app_command_line = ""
default_documents = []
dotnet_framework_version = "v4.0"
ftps_state = "AllAllowed"
health_check_path = ""
http2_enabled = true
linux_fx_version = "DOTNETCORE|3.1"
local_mysql_enabled = false
managed_pipeline_mode = "Integrated"
min_tls_version = "1.2"
python_version = "3.4"
remote_debugging_enabled = false
remote_debugging_version = "VS2019"
use_32_bit_worker_process = false
windows_fx_version = ""
websockets_enabled = true
cors {
allowed_origins = [format("https://%s", azurerm_app_service.fe.default_site_hostname)]
support_credentials = true
}
}
app_settings = {
"WEBSITE_DNS_SERVER" = "168.63.129.16"
"WEBSITE_VNET_ROUTE_ALL" = "1"
}
}
Code for UUID
resource "random_uuid" "qb2-sal" {}
Code for FE App Reg
resource "azuread_application" "app-fe" {
display_name = format("%s-fe", var.project.name)
api {
oauth2_permission_scope {
admin_consent_description = "Allows the app to read and write data"
admin_consent_display_name = local.oauth2_permissions.read-and-write.admin_consent_display_name
enabled = true
id = random_uuid.qb2-sal.results
type = "User"
value = "read-and-write"
}
}
app_role {
allowed_member_types = ["User", "Application"]
description = "Application administrators have the ability to administer the application."
display_name = local.app_roles.application-administrator.display_name
enabled = true
id = "02c4e591-d667-51db-5597-e2c446ec246b"
value = "application-administrator"
}
web {
logout_url = format("https://%s.azurewebsites.net/.auth/logout", module.names-web-app-fe.location.app_service.name_unique)
redirect_uris = [format("https://%s.azurewebsites.net/.auth/login/aad/callback", module.names-web-app-fe.location.app_service.name_unique)]
implicit_grant {
access_token_issuance_enabled = true
id_token_issuance_enabled = true
}
}
required_resource_access {
resource_app_id = azuread_application.app-be.application_id # Microsoft Graph
resource_access {
id = "02c4e591-d667-51db-5597-e2c446ec246b" # User.Read.All
type = "Role"
}
}
}
FE App SP Secret
resource "azuread_application_password" "fe-app-sp-secret" {
application_object_id = azuread_application.app-fe.object_id
}
BE App Reg
resource "azuread_application" "app-be" {
display_name = format("%s-be", var.project.name)
api {
oauth2_permission_scope {
admin_consent_description = "Allows the app to read and write data"
admin_consent_display_name = local.oauth2_permissions.read-and-write.admin_consent_display_name
enabled = true
id = random_uuid.qb2-sal.result
type = "User"
value = "read-and-write"
}
}
app_role {
allowed_member_types = ["User", "Application"]
description = "Application administrators have the ability to administer the application."
display_name = local.app_roles.application-administrator.display_name
enabled = true
id = "02c4e591-d667-51db-5597-e2c446ec246b"
value = "application-administrator"
}
web {
logout_url = format("https://%s.azurewebsites.net/.auth/logout", module.names-web-app-be.location.app_service.name_unique)
redirect_uris = [format("https://%s.azurewebsites.net/.auth/login/aad/callback", module.names-web-app-be.location.app_service.name_unique)]
implicit_grant {
access_token_issuance_enabled = true
id_token_issuance_enabled = true
}
}
}
BE App SP Secret
resource "azuread_application_password" "be-app-sp-secret" {
application_object_id = azuread_application.app-be.object_id
}
Error while doing TF plan
Error: .group_membership_claims: missing expected [
You have resource dependency issues with your code. You probably need to use the terraform graph command terraform graph -draw-cycles or draw a dependency graph manually (although it's painful) to understand what's really happening. Check this answer for more information.
Looking at your code, it seems that you have:
azurerm_app_service.fe depends on azuread_application.app-fe
azuread_application.app-fe depends on azurerm_app_service.be
azurerm_app_service.be depends on azurerm_app_service.fe
This seems to be where your cycle is. Your FE AppService depends on the BE AppService to be created but the BE AppService has a dependency on the hostname of the FE AppService.
I believe as a quick fix you could try to remove this line to fix it (although I have not tried it). Or else, re-reference otherwise or even try to add it post deployment.
allowed_origins = [format("https://%s", azurerm_app_service.fe.default_site_hostname)]
There's now some help in the TF documentation:
https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/guides/microsoft-graph
Search for "group_membership_claims"
I would like to setup GCP CloudSQL by terraform. im reading this doc. (https://www.terraform.io/docs/providers/google/r/sql_database_instance.html)
and I execute "terraform plan" with such code.
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "asia-northeasteast1"
database_version = "MYSQL_5_6"
project = "test-141901"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
database_flags {
name = "slow_query_log"
value = "on"
name = "character_set_server"
value = "utf8mb4"
}
}
}
but I can not setup MULTIPLE database_flags
settings.0.database_flags.#: "1"
settings.0.database_flags.0.name: "character_set_server"
settings.0.database_flags.0.value: "utf8mb4"
How can I setup CloudSQL with multiple database_flags? I can not understand "sublist support" at that document.
You should be able to do this by using multiple database_flags blocks:
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "us-central1"
database_version = "MYSQL_5_6"
project = "test-project"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
database_flags {
name = "slow_query_log"
value = "on"
}
database_flags {
name = "character_set_server"
value = "utf8mb4"
}
}
}
Here is the output of terraform plan with the above tf:
+ google_sql_database_instance.master
database_version: "MYSQL_5_6"
ip_address.#: "<computed>"
name: "terraform-master"
project: "test-project"
region: "us-central1"
self_link: "<computed>"
settings.#: "1"
settings.0.backup_configuration.#: "1"
settings.0.backup_configuration.0.enabled: "true"
settings.0.backup_configuration.0.start_time: "17:00"
settings.0.database_flags.#: "2"
settings.0.database_flags.0.name: "slow_query_log"
settings.0.database_flags.0.value: "on"
settings.0.database_flags.1.name: "character_set_server"
settings.0.database_flags.1.value: "utf8mb4"
settings.0.ip_configuration.#: "1"
settings.0.ip_configuration.0.ipv4_enabled: "true"
settings.0.replication_type: "SYNCHRONOUS"
settings.0.tier: "db-f1-micro"
settings.0.version: "<computed>"
I want to elaborate on this answer as I needed to generate the database_flags-block based on input. Suppose you have a variable
variable "database-flags" {
type = "map"
default = {
character_set_server = "utf8mb4"
slow_query_log = "on"
}
}
using terraform v0.12.X this can be written as
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "us-central1"
database_version = "MYSQL_5_6"
project = "test-project"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
dynamic "database_flags" {
iterator = flag
for_each = var.database-flags
content {
name = flag.key
value = flag.value
}
}
}
}
Using the above pattern, you can use the database-creating-part of the code as a module and let the consumer decide what flags should be set