I am trying to register two applications one is a Front End App and the other is a Backend App. I am having corresponding Web Apps for them as well.
I am using Terraform to deploy my apps and all the infrastructure.
But while running TF plan i get the cyclic error. Please find below my code.
Can anyone please help me rectify this
Code for Web App FE
resource "azurerm_app_service" "fe" {
location = module.resourcegroup.resource_group.location
resource_group_name = module.resourcegroup.resource_group.name
tags = module.resourcegroup.resource_group.tags
app_service_plan_id = azurerm_app_service_plan.default.id
name = module.names-web-app-fe.location.app_service.name_unique
identity { type = "SystemAssigned" }
auth_settings {
enabled = true
default_provider = "AzureActiveDirectory"
issuer = format("https://sts.windows.net/%s/", data.azurerm_client_config.default.tenant_id)
runtime_version = "~1"
token_store_enabled = true
unauthenticated_client_action = "RedirectToLoginPage"
additional_login_params = {
"response_type" = "code id_token",
"resource" = azuread_application.app-fe.application_id
}
active_directory {
client_id = azuread_application.app-fe.object_id
client_secret = azuread_application_password.fe-app-sp-secret.application_object_id
allowed_audiences = [format("https://%s.azurewebsites.net", module.names-web-app-fe.location.app_service.name_unique)]
}
}
site_config {
always_on = true
app_command_line = ""
default_documents = []
dotnet_framework_version = "v4.0"
ftps_state = "Disabled"
health_check_path = ""
http2_enabled = true
linux_fx_version = "STATICSITE|1.0"
local_mysql_enabled = false
managed_pipeline_mode = "Integrated"
min_tls_version = "1.2"
#pre_warmed_instance_count = 0
python_version = "3.4"
remote_debugging_enabled = false
remote_debugging_version = "VS2019"
use_32_bit_worker_process = false
websockets_enabled = false
windows_fx_version = ""
cors {
allowed_origins = []
support_credentials = false
}
}
app_settings = {
"WEBSITE_DNS_SERVER" = "168.63.129.16"
"WEBSITE_VNET_ROUTE_ALL" = "1"
}
}
Code for Web App BE
resource "azurerm_app_service" "be" {
location = module.resourcegroup.resource_group.location
resource_group_name = module.resourcegroup.resource_group.name
tags = module.resourcegroup.resource_group.tags
app_service_plan_id = azurerm_app_service_plan.default.id
name = module.names-web-app-be.location.app_service.name_unique
identity { type = "SystemAssigned" }
auth_settings {
enabled = true
default_provider = "AzureActiveDirectory"
issuer = format("https://sts.windows.net/%s/", data.azurerm_client_config.default.tenant_id)
runtime_version = "~1"
token_store_enabled = true
unauthenticated_client_action = "RedirectToLoginPage"
additional_login_params = {
"response_type" = "code id_token",
"resource" = azuread_application.app-be.application_id
}
active_directory {
client_id = azuread_application.app-be.object_id
client_secret = azuread_application_password.be-app-sp-secret.application_object_id
allowed_audiences = []
}
}
site_config {
always_on = true
app_command_line = ""
default_documents = []
dotnet_framework_version = "v4.0"
ftps_state = "AllAllowed"
health_check_path = ""
http2_enabled = true
linux_fx_version = "DOTNETCORE|3.1"
local_mysql_enabled = false
managed_pipeline_mode = "Integrated"
min_tls_version = "1.2"
python_version = "3.4"
remote_debugging_enabled = false
remote_debugging_version = "VS2019"
use_32_bit_worker_process = false
windows_fx_version = ""
websockets_enabled = true
cors {
allowed_origins = [format("https://%s", azurerm_app_service.fe.default_site_hostname)]
support_credentials = true
}
}
app_settings = {
"WEBSITE_DNS_SERVER" = "168.63.129.16"
"WEBSITE_VNET_ROUTE_ALL" = "1"
}
}
Code for UUID
resource "random_uuid" "qb2-sal" {}
Code for FE App Reg
resource "azuread_application" "app-fe" {
display_name = format("%s-fe", var.project.name)
api {
oauth2_permission_scope {
admin_consent_description = "Allows the app to read and write data"
admin_consent_display_name = local.oauth2_permissions.read-and-write.admin_consent_display_name
enabled = true
id = random_uuid.qb2-sal.results
type = "User"
value = "read-and-write"
}
}
app_role {
allowed_member_types = ["User", "Application"]
description = "Application administrators have the ability to administer the application."
display_name = local.app_roles.application-administrator.display_name
enabled = true
id = "02c4e591-d667-51db-5597-e2c446ec246b"
value = "application-administrator"
}
web {
logout_url = format("https://%s.azurewebsites.net/.auth/logout", module.names-web-app-fe.location.app_service.name_unique)
redirect_uris = [format("https://%s.azurewebsites.net/.auth/login/aad/callback", module.names-web-app-fe.location.app_service.name_unique)]
implicit_grant {
access_token_issuance_enabled = true
id_token_issuance_enabled = true
}
}
required_resource_access {
resource_app_id = azuread_application.app-be.application_id # Microsoft Graph
resource_access {
id = "02c4e591-d667-51db-5597-e2c446ec246b" # User.Read.All
type = "Role"
}
}
}
FE App SP Secret
resource "azuread_application_password" "fe-app-sp-secret" {
application_object_id = azuread_application.app-fe.object_id
}
BE App Reg
resource "azuread_application" "app-be" {
display_name = format("%s-be", var.project.name)
api {
oauth2_permission_scope {
admin_consent_description = "Allows the app to read and write data"
admin_consent_display_name = local.oauth2_permissions.read-and-write.admin_consent_display_name
enabled = true
id = random_uuid.qb2-sal.result
type = "User"
value = "read-and-write"
}
}
app_role {
allowed_member_types = ["User", "Application"]
description = "Application administrators have the ability to administer the application."
display_name = local.app_roles.application-administrator.display_name
enabled = true
id = "02c4e591-d667-51db-5597-e2c446ec246b"
value = "application-administrator"
}
web {
logout_url = format("https://%s.azurewebsites.net/.auth/logout", module.names-web-app-be.location.app_service.name_unique)
redirect_uris = [format("https://%s.azurewebsites.net/.auth/login/aad/callback", module.names-web-app-be.location.app_service.name_unique)]
implicit_grant {
access_token_issuance_enabled = true
id_token_issuance_enabled = true
}
}
}
BE App SP Secret
resource "azuread_application_password" "be-app-sp-secret" {
application_object_id = azuread_application.app-be.object_id
}
Error while doing TF plan
Error: .group_membership_claims: missing expected [
You have resource dependency issues with your code. You probably need to use the terraform graph command terraform graph -draw-cycles or draw a dependency graph manually (although it's painful) to understand what's really happening. Check this answer for more information.
Looking at your code, it seems that you have:
azurerm_app_service.fe depends on azuread_application.app-fe
azuread_application.app-fe depends on azurerm_app_service.be
azurerm_app_service.be depends on azurerm_app_service.fe
This seems to be where your cycle is. Your FE AppService depends on the BE AppService to be created but the BE AppService has a dependency on the hostname of the FE AppService.
I believe as a quick fix you could try to remove this line to fix it (although I have not tried it). Or else, re-reference otherwise or even try to add it post deployment.
allowed_origins = [format("https://%s", azurerm_app_service.fe.default_site_hostname)]
There's now some help in the TF documentation:
https://registry.terraform.io/providers/hashicorp/azuread/latest/docs/guides/microsoft-graph
Search for "group_membership_claims"
Related
getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come
I am using an Azure Linux Web App (terraform below) to host a docker container which is the base image of Keycloak, our authentication and authorization provider. We have a requirement to import a theme which can be done by mounting a folder in a specific location within the docker container.
I did see where it is possible to mount blob storage inside of the linux web app, but I'm not sure how to get that mounted inside of the docker container which is defined in the application stack.
Question
How can I set where the mount point goes rather than /keycloak/custom-themes? I would like it to go under /opt/keycloak/themes?
locals {
storage_account_name = "themestoragean001"
blob_container_name = "themes"
storage_account_kind = "Storage"
}
resource "azurerm_storage_account" "access_service_storage_account" {
name = local.storage_account_name
resource_group_name = data.azurerm_resource_group.resource_group.name
location = data.azurerm_resource_group.resource_group.location
account_tier = "Standard"
account_replication_type = "GRS"
account_kind = local.storage_account_kind
tags = merge(
local.resource_tags,
{
Purpose = "Storage Account for Keycloak themes for ${local.environment}"
StorageKind = local.storage_account_kind
}
)
}
resource "azurerm_storage_container" "container" {
name = local.blob_container_name
storage_account_name = azurerm_storage_account.access_service_storage_account.name
container_access_type = "private"
}
resource "azurerm_linux_web_app" "keycloak_web_app" {
name = local.app_service_name
location = local.default_region
resource_group_name = data.azurerm_resource_group.resource_group.name
service_plan_id = azurerm_service_plan.access_service_plan.id
https_only = true
app_settings = {
DOCKER_REGISTRY_SERVER_URL = "https://${local.keycloak_registry_server}"
PROXY_ADDRESS_FORWARDING = true
KC_HOSTNAME = var.keycloak_hostname
KC_HTTP_ENABLED = true
KC_METRICS_ENABLED = true
KC_DB_URL_HOST = azurerm_postgresql_flexible_server.keycloak_postgresql_server.fqdn
KC_DB_URL_PORT = 5432
KC_DB_SCHEMA = "public"
KC_DB_USERNAME = local.postgres_admin_username
KC_DB_PASSWORD = local.postgres_admin_password
KEYCLOAK_ADMIN = local.keycloak_admin_username
KEYCLOAK_ADMIN_PASSWORD = local.keycloak_admin_password
WEBSITES_PORT = 8080
KC_PROXY = "edge"
KC_HOSTNAME_STRICT = false
}
identity {
type = "SystemAssigned"
}
site_config {
always_on = true
http2_enabled = true
minimum_tls_version = 1.2
app_command_line = "start --optimized --proxy=edge --hostname-strict-https=false"
health_check_path = "/"
remote_debugging_enabled = false
application_stack {
docker_image = "${local.keycloak_registry_server}/${local.keycloak_image}"
docker_image_tag = "19.0.3"
}
}
storage_account {
access_key = azurerm_storage_account.access_service_storage_account.primary_connection_string
account_name = local.storage_account_name
name = "LinuxThemeMount"
share_name = "themes"
type = "AzureBlob"
mount_path = "/keycloak/custom-themes"
}
tags = local.resource_tags
}
Azure Linux Web App Configuration:
I have a terraform code that is creating kubernetes cluster resource in Oracle cloud.
I want to ignore endpoint_config block when the cluster is public and execute this block when the cluster is private. How can I achieve that
resource "oci_containerengine_cluster" "cluster" {
count = var.deploy_oke_cluster ? 1 : 0
compartment_id = var.compartment_id
kubernetes_version = var.cluster_kubernetes_version
name = "oke-${var.environment}"
vcn_id = oci_core_virtual_network.base_vcn.id
endpoint_config {
is_public_ip_enabled = false
subnet_id = oci_core_subnet.snet-apiserver.id
}
options {
add_ons {
is_kubernetes_dashboard_enabled = true
is_tiller_enabled = false
}
kubernetes_network_config {
pods_cidr = var.pods_cidr
services_cidr = var.services_cidr
}
service_lb_subnet_ids = [oci_core_subnet.snet-pub-lb.id]
}
}
You can do this with dynamic blocks:
resource "oci_containerengine_cluster" "cluster" {
count = var.deploy_oke_cluster ? 1 : 0
compartment_id = var.compartment_id
kubernetes_version = var.cluster_kubernetes_version
name = "oke-${var.environment}"
vcn_id = oci_core_virtual_network.base_vcn.id
dynamic "endpoint_config" {
for_each = var.is_public == true ? [1] : []
content {
is_public_ip_enabled = false
subnet_id = oci_core_subnet.snet-apiserver.id
}
}
options {
add_ons {
is_kubernetes_dashboard_enabled = true
is_tiller_enabled = false
}
kubernetes_network_config {
pods_cidr = var.pods_cidr
services_cidr = var.services_cidr
}
service_lb_subnet_ids = [oci_core_subnet.snet-pub-lb.id]
}
}
I need some assistance to undertand the various forms of logging in to Databricks. I am using Terraform to provision Azure Databricks
I would like to know the difference in the two codes below
When i use option 1, i get the error as shown
Option 1:
required_providers {
azuread = "~> 1.0"
azurerm = "~> 2.0"
azuredevops = { source = "registry.terraform.io/microsoft/azuredevops", version = "~> 0.0" }
databricks = { source = "registry.terraform.io/databrickslabs/databricks", version = "~> 0.0" }
}
}
provider "random" {}
provider "azuread" {
tenant_id = var.project.arm.tenant.id
client_id = var.project.arm.client.id
client_secret = var.secret.arm.client.secret
}
provider "databricks" {
host = azurerm_databricks_workspace.db-workspace.workspace_url
azure_use_msi = true
}
resource "azurerm_databricks_workspace" "db-workspace" {
name = module.names-db-workspace.environment.databricks_workspace.name_unique
resource_group_name = module.resourcegroup.resource_group.name
location = module.resourcegroup.resource_group.location
sku = "premium"
public_network_access_enabled = true
custom_parameters {
no_public_ip = true
virtual_network_id = module.virtualnetwork["centralus"].virtual_network.self.id
public_subnet_name = module.virtualnetwork["centralus"].virtual_network.subnets["db-sub-1-public"].name
private_subnet_name = module.virtualnetwork["centralus"].virtual_network.subnets["db-sub-2-private"].name
public_subnet_network_security_group_association_id = module.virtualnetwork["centralus"].virtual_network.nsgs.associations.subnets["databricks-public-nsg-db-sub-1-public"].id
private_subnet_network_security_group_association_id = module.virtualnetwork["centralus"].virtual_network.nsgs.associations.subnets["databricks-private-nsg-db-sub-2-private"].id
}
tags = local.tags
}
Databricks Cluster Creation
resource "databricks_cluster" "dbcselfservice" {
cluster_name = format("adb-cluster-%s-%s", var.project.name, var.project.environment.name)
spark_version = var.spark_version
node_type_id = var.node_type_id
autotermination_minutes = 20
autoscale {
min_workers = 1
max_workers = 7
}
azure_attributes {
availability = "SPOT_AZURE"
first_on_demand = 1
spot_bid_max_price = 100
}
depends_on = [
azurerm_databricks_workspace.db-workspace
]
}
Databricks Workspace RBAC Permission
resource "databricks_group" "db-group" {
display_name = format("adb-users-%s", var.project.name)
allow_cluster_create = true
allow_instance_pool_create = true
depends_on = [
resource.azurerm_databricks_workspace.db-workspace
]
}
resource "databricks_user" "dbuser" {
count = length(local.display_name)
display_name = local.display_name[count.index]
user_name = local.user_name[count.index]
workspace_access = true
depends_on = [
resource.azurerm_databricks_workspace.db-workspace
]
}
Adding Members to Databricks Admin Group
resource "databricks_group_member" "i-am-admin" {
for_each = toset(local.email_address)
group_id = data.databricks_group.admins.id
member_id = databricks_user.dbuser[index(local.email_address, each.key)].id
depends_on = [
resource.azurerm_databricks_workspace.db-workspace
]
}
data "databricks_group" "admins" {
display_name = "admins"
depends_on = [
# resource.databricks_cluster.dbcselfservice,
resource.azurerm_databricks_workspace.db-workspace
]
}
The error that i get while TF apply is below :
Error: User not authorized
with databricks_user.dbuser[1],
on resources.adb.tf line 80, in resource "databricks_user" "dbuser":
80: resource "databricks_user" "dbuser"{
Error: User not authorized
with databricks_user.dbuser[0],
on resources.adb.tf line 80, in resource "databricks_user" "dbuser":
80: resource "databricks_user" "dbuser"{
Error: cannot refresh AAD token: adal:Refresh request failed. Status Code = '500'. Response body: {"error":"server_error", "error_description":"Internal server error"} Endpoint http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fmanagement.core.windows.net%2F
with databricks_group.db-group,
on resources.adb.tf line 80, in resource "databricks_group" "db-group":
71: resource "databricks_group" "db-group"{
Is the error coming because of this block below ?
provider "databricks" {
host = azurerm_databricks_workspace.db-workspace.workspace_url
azure_use_msi = true
}
I just need to login automatically when i click on the URL from the portal. So what shall i use for that? And why do we need to provide two times databricks providers, once under required_providers and again in provider "databricks"?
I have seen if i don't provide the second provider i get the error :
"authentication is not configured for provider"
As mentioned in the comments , If you are using Azure CLI authentication i.e. az login using your username and password , then you can use the below code :
terraform {
required_providers {
databricks = {
source = "databrickslabs/databricks"
version = "0.3.11"
}
}
}
provider "azurerm" {
features {}
}
provider "databricks" {
host = azurerm_databricks_workspace.example.workspace_url
}
resource "azurerm_databricks_workspace" "example" {
name = "DBW-ansuman"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
sku = "premium"
managed_resource_group_name = "ansuman-DBW-managed-without-lb"
public_network_access_enabled = true
custom_parameters {
no_public_ip = true
public_subnet_name = azurerm_subnet.public.name
private_subnet_name = azurerm_subnet.private.name
virtual_network_id = azurerm_virtual_network.example.id
public_subnet_network_security_group_association_id = azurerm_subnet_network_security_group_association.public.id
private_subnet_network_security_group_association_id = azurerm_subnet_network_security_group_association.private.id
}
tags = {
Environment = "Production"
Pricing = "Standard"
}
}
data "databricks_node_type" "smallest" {
local_disk = true
depends_on = [
azurerm_databricks_workspace.example
]
}
data "databricks_spark_version" "latest_lts" {
long_term_support = true
depends_on = [
azurerm_databricks_workspace.example
]
}
resource "databricks_cluster" "dbcselfservice" {
cluster_name = "Shared Autoscaling"
spark_version = data.databricks_spark_version.latest_lts.id
node_type_id = data.databricks_node_type.smallest.id
autotermination_minutes = 20
autoscale {
min_workers = 1
max_workers = 7
}
azure_attributes {
availability = "SPOT_AZURE"
first_on_demand = 1
spot_bid_max_price = 100
}
depends_on = [
azurerm_databricks_workspace.example
]
}
resource "databricks_group" "db-group" {
display_name = "adb-users-admin"
allow_cluster_create = true
allow_instance_pool_create = true
depends_on = [
resource.azurerm_databricks_workspace.example
]
}
resource "databricks_user" "dbuser" {
display_name = "Rahul Sharma"
user_name = "example#contoso.com"
workspace_access = true
depends_on = [
resource.azurerm_databricks_workspace.example
]
}
resource "databricks_group_member" "i-am-admin" {
group_id = databricks_group.db-group.id
member_id = databricks_user.dbuser.id
depends_on = [
resource.azurerm_databricks_workspace.example
]
}
Output:
If you are using Service Principal as authentication , then you can use something like below:
terraform {
required_providers {
databricks = {
source = "databrickslabs/databricks"
version = "0.3.11"
}
}
}
provider "azurerm" {
subscription_id = "948d4068-xxxx-xxxx-xxxx-e00a844e059b"
tenant_id = "72f988bf-xxxx-xxxx-xxxx-2d7cd011db47"
client_id = "f6a2f33d-xxxx-xxxx-xxxx-d713a1bb37c0"
client_secret = "inl7Q~Gvdxxxx-xxxx-xxxxyaGPF3uSoL"
features {}
}
provider "databricks" {
host = azurerm_databricks_workspace.example.workspace_url
azure_client_id = "f6a2f33d-xxxx-xxxx-xxxx-d713a1bb37c0"
azure_client_secret = "inl7Q~xxxx-xxxx-xxxxg6ntiyaGPF3uSoL"
azure_tenant_id = "72f988bf-xxxx-xxxx-xxxx-2d7cd011db47"
}
resource "azurerm_databricks_workspace" "example" {
name = "DBW-ansuman"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
sku = "premium"
managed_resource_group_name = "ansuman-DBW-managed-without-lb"
public_network_access_enabled = true
custom_parameters {
no_public_ip = true
public_subnet_name = azurerm_subnet.public.name
private_subnet_name = azurerm_subnet.private.name
virtual_network_id = azurerm_virtual_network.example.id
public_subnet_network_security_group_association_id = azurerm_subnet_network_security_group_association.public.id
private_subnet_network_security_group_association_id = azurerm_subnet_network_security_group_association.private.id
}
tags = {
Environment = "Production"
Pricing = "Standard"
}
}
data "databricks_node_type" "smallest" {
local_disk = true
depends_on = [
azurerm_databricks_workspace.example
]
}
data "databricks_spark_version" "latest_lts" {
long_term_support = true
depends_on = [
azurerm_databricks_workspace.example
]
}
resource "databricks_cluster" "dbcselfservice" {
cluster_name = "Shared Autoscaling"
spark_version = data.databricks_spark_version.latest_lts.id
node_type_id = data.databricks_node_type.smallest.id
autotermination_minutes = 20
autoscale {
min_workers = 1
max_workers = 7
}
azure_attributes {
availability = "SPOT_AZURE"
first_on_demand = 1
spot_bid_max_price = 100
}
depends_on = [
azurerm_databricks_workspace.example
]
}
resource "databricks_group" "db-group" {
display_name = "adb-users-admin"
allow_cluster_create = true
allow_instance_pool_create = true
depends_on = [
resource.azurerm_databricks_workspace.example
]
}
resource "databricks_user" "dbuser" {
display_name = "Rahul Sharma"
user_name = "example#contoso.com"
workspace_access = true
depends_on = [
resource.azurerm_databricks_workspace.example
]
}
resource "databricks_group_member" "i-am-admin" {
group_id = databricks_group.db-group.id
member_id = databricks_user.dbuser.id
depends_on = [
resource.azurerm_databricks_workspace.example
]
}
And why do we need to provide
two times databricks providers, once under required_providers and
again in provider "databricks"?
The required_providers is used to download and initialize the required providers from the source i.e. Terraform Registry . But the Provider Block is used for further configuration of that downloaded provider like describing client_id, features block etc. which can be used for authentication or other configuration.
The azure_use_msi option is primarily intended for use from CI/CD pipelines that are executed on machines with managed identity assigned to them. All possible authentication options are described in the documenation, but simplest way is to use authentication via Azure CLI, so you just need to leave host parameter in the provider block. If you don't have Azure CLI on that machine, you can use combination of host + personal access token instead.
if you're running that code from the machine with assigned managed identity, then you need to make sure that this identity is either added into workspace, or it has Contributor access to it - see Azure Databricks documentation for more details.
I would like to setup GCP CloudSQL by terraform. im reading this doc. (https://www.terraform.io/docs/providers/google/r/sql_database_instance.html)
and I execute "terraform plan" with such code.
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "asia-northeasteast1"
database_version = "MYSQL_5_6"
project = "test-141901"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
database_flags {
name = "slow_query_log"
value = "on"
name = "character_set_server"
value = "utf8mb4"
}
}
}
but I can not setup MULTIPLE database_flags
settings.0.database_flags.#: "1"
settings.0.database_flags.0.name: "character_set_server"
settings.0.database_flags.0.value: "utf8mb4"
How can I setup CloudSQL with multiple database_flags? I can not understand "sublist support" at that document.
You should be able to do this by using multiple database_flags blocks:
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "us-central1"
database_version = "MYSQL_5_6"
project = "test-project"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
database_flags {
name = "slow_query_log"
value = "on"
}
database_flags {
name = "character_set_server"
value = "utf8mb4"
}
}
}
Here is the output of terraform plan with the above tf:
+ google_sql_database_instance.master
database_version: "MYSQL_5_6"
ip_address.#: "<computed>"
name: "terraform-master"
project: "test-project"
region: "us-central1"
self_link: "<computed>"
settings.#: "1"
settings.0.backup_configuration.#: "1"
settings.0.backup_configuration.0.enabled: "true"
settings.0.backup_configuration.0.start_time: "17:00"
settings.0.database_flags.#: "2"
settings.0.database_flags.0.name: "slow_query_log"
settings.0.database_flags.0.value: "on"
settings.0.database_flags.1.name: "character_set_server"
settings.0.database_flags.1.value: "utf8mb4"
settings.0.ip_configuration.#: "1"
settings.0.ip_configuration.0.ipv4_enabled: "true"
settings.0.replication_type: "SYNCHRONOUS"
settings.0.tier: "db-f1-micro"
settings.0.version: "<computed>"
I want to elaborate on this answer as I needed to generate the database_flags-block based on input. Suppose you have a variable
variable "database-flags" {
type = "map"
default = {
character_set_server = "utf8mb4"
slow_query_log = "on"
}
}
using terraform v0.12.X this can be written as
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "us-central1"
database_version = "MYSQL_5_6"
project = "test-project"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
dynamic "database_flags" {
iterator = flag
for_each = var.database-flags
content {
name = flag.key
value = flag.value
}
}
}
}
Using the above pattern, you can use the database-creating-part of the code as a module and let the consumer decide what flags should be set