terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "<= 3.30.0"
}
}
required_version = ">= 0.13"
}
provider "azurerm" {
features {}
}
data "azurerm_subscription" "current" {
subscription_id = var.subscription_id
}
data "azurerm_resource_group" "current" {
name = var.resource_group
}
#data "azurerm_storage_account" "default"{
# name=var.storage_account_name
# resource_group_name=var.storage_account_resource_group
#}
#resource "azurerm_storage_queue" "default" {
#name=var.storage_queue_name
#storage_account_name=data.azurerm_storage_account.default.name
#}
module "storage-account" {
source = "git#github.com:procter-gamble/terraform-azure-storage-account.git?ref=v4.0.0"
count = var.existing_storage_account ? 0 : 1
subscription_id = data.azurerm_subscription.current.subscription_id
resource_group = data.azurerm_resource_group.current.name
resource_name = var.storage_account_name
access_groups = []
resource_tags = var.resource_tags
account_tier = var.storage_account_tier
access_tier = var.storage_access_tier
replication_type = var.storage_replication_type
virtual_network_subnet_ids = var.storage_virtual_network_subnet_ids
create_private_dns = false
enable_private_endpoint = false
queues = var.queues
ip_rules = var.ip_rules
}
resource "azurerm_eventgrid_event_subscription" "default" {
for_each = { for event in var.event_grid : event.name => event }
name = each.value.name
event_delivery_schema = lookup(each.value, "event_delivery_schema", null)
advanced_filtering_on_arrays_enabled = lookup(each.value, "advanced_filtering_on_arrays_enabled", null)
scope = data.azurerm_resource_group.current.id
dynamic "storage_queue_endpoint" {
for_each = lookup(each.value, "storage_queue_endpoint", "false") == "true" ? [1] : []
content {
storage_account_id = join(",",[module.storage-account[0].id])
queue_name = "test"
queue_message_time_to_live_in_seconds = lookup(each.value, "queue_message_time_to_live_in_seconds", null)
}
}
depends_on=[module.storage-account]
**
> }
Planning to create azure event grid subscription with
storage_queue_endpoint. I'm able to create storage account & queue but while access endpoint toughing an below error
╷ │ Error: waiting for Event Subscription: (Name "evengridtest01" /
Scope
"/subscriptions/**/resourceGroups/"): Code="Endpoint validation" Message="Destination endpoint not found.
Resource details: resourceId:
/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/tfteststgacc009.
Resource should pre-exist before attempting this operation. Activity
id:7a4d548e-53ba-4530-a93e-8c2d48aad183, timestamp: 2/17/2023 3:54:27
PM (UTC)." │ │ with
azurerm_eventgrid_event_subscription.default["evengridtest01"], │ on
main.tf line 50, in resource "azurerm_eventgrid_event_subscription"
"default": │ 50: resource "azurerm_eventgrid_event_subscription"
"default" { │ ╵
I tried the similar code like yours :
Code:
resource azurerm_storage_account "storage-account" {
count = var.existing_storage_account ? 0 : 1
name = var.storage_account_name
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
account_tier = "Standard"
account_replication_type = "LRS"
access_tier = var.storage_access_tier
}
resource "azurerm_eventgrid_event_subscription" "default" {
for_each = { for event in var.event_grid : event.name => event }
name = each.value.name
event_delivery_schema = lookup(each.value, "event_delivery_schema", null)
advanced_filtering_on_arrays_enabled = lookup(each.value, "advanced_filtering_on_arrays_enabled", null)
scope = data.azurerm_resource_group.example.id
dynamic "storage_queue_endpoint" {
for_each = lookup(each.value, "storage_queue_endpoint", "false") == "true" ? [0] : [1]
content {
storage_account_id = join(",", [azurerm_storage_account.storage-account[0].id])
queue_name = "test"
queue_message_time_to_live_in_seconds = lookup(each.value, "queue_message_time_to_live_in_seconds", null)
}
}
depends_on = [azurerm_storage_account.storage-account ]
}
│ Error: waiting for Event Subscription: (Name "subscription2" / Scope "/subscriptions/xxxe87ca71c/resourceGroups/xxx"): Code="Endpoint validation" Message="Destination endpoint not found. Resource details: resourceId: /subscriptions/xx/resourceGroups/xxx/providers/Microsoft.Storage/storageAccounts/mykastorgeaccount. Resource should pre-exist before attempting this operation.
For that , there must be existing storage account present before creating event grid subscription to a destinantion endpoint.
az eventgrid topic create --name xxx --location xxx --resource-group xxx
az eventgrid event-subscription create --name <subscription name> --resource-group <resource group name> --topic-name <xxx> --endpoint <endpoint-url>
Create an Event Grid event subscription for each storage queue endpoint
Then check the storage account is existing
az storage account show --name <storage account name> --resource-group <resource group name>
Then as the endpoint is pointed successfully, the event grid subscription can be created.
The following code worked for me:
Variables.tf
variable "existing_storage_account" {
description = "Flag indicating whether an existing storage account should be used or a new one created"
type = bool
default = true
}
variable "storage_account_name" {
description = "Name of the storage account to be created"
type = string
default = "mykastorgeaccount"
}
variable "storage_account_tier" {
description = "Tier of the storage account (Standard or Premium)"
type = string
default = "Standard"
}
variable "storage_access_tier" {
description = "Access tier of the storage account (Hot or Cool)"
type = string
default = "Hot"
}
variable "storage_replication_type" {
description = "Replication type of the storage account (LRS, GRS, RAGRS, or ZRS)"
type = string
default = "LRS"
}
Main.tf
resource azurerm_storage_account "storageaccount" {
count = var.existing_storage_account ? 1 : 0
name = var.storage_account_name
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
account_tier = "Standard"
account_replication_type = "LRS"
access_tier = var.storage_access_tier
}
resource "azurerm_storage_queue" "example" {
name = "examplestq"
storage_account_name = azurerm_storage_account.storageaccount[0].name
}
resource "azurerm_eventgrid_event_subscription" "default" {
for_each = { for event in var.event_grid : event.name => event }
name = each.value.name
event_delivery_schema = lookup(each.value, "event_delivery_schema", null)
advanced_filtering_on_arrays_enabled = lookup(each.value, "advanced_filtering_on_arrays_enabled", null)
scope=azurerm_storage_account.storageaccount[0].id
dynamic "storage_queue_endpoint" {
for_each = lookup(each.value, "storage_queue_endpoint", "false") == "true" ? [1] : [0]
content {
storage_account_id = join(",", [azurerm_storage_account.storageaccount[0].id])
queue_name = azurerm_storage_queue.example.name
queue_message_time_to_live_in_seconds = lookup(each.value, "queue_message_time_to_live_in_seconds", null)
}
}
depends_on = [azurerm_storage_account.storageaccount ]
}
Reference :
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/eventgrid_event_subscription
Related
I am trying to create multiple Azure Storage Accounts with set of containers counts, however I couldn't achieve due to multiple resources blocks. As of the below code that I wrote which creates multiple containers in the one storage account. How can I achieve same to create multiple containers in different storage accounts ?
resource "azurerm_resource_group" "rg" {
name = "test-rg"
location = "eastus"
}
variable "storageaccountname" {
type = string
default = "storageaccount1"
}
variable "containers_list" {
type = list
default = [{ name = "sa1container1", access_type = "private" }, {name = "sa1container2", access_type = "private" },{name = "sa1container3", access_type = "private" }]
}
resource "azurerm_storage_account" "storageaccount" {
name = var.storageaccountname
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
account_tier = "Standard"
account_replication_type = "LRS"
}
resource "azurerm_storage_container" "container" {
count = length(var.containers_list)
name = var.containers_list[count.index].name
storage_account_name = azurerm_storage_account.storageaccount.name
container_access_type = var.containers_list[count.index].access_type
}
I tried to reproduce the scenario in my environment :
Create list of names for storage and container .
Variables.tf
variable "containers_list" {
type = list
default = [{ name = "sa1container1", access_type = "private" }, {name = "sa1container2", access_type = "private" },{name = "sa1container3", access_type = "private" }]
}
variable "Storage_list" {
type = list
default = ["sa1stor1", "sa1stor2","sa1stor3"]
}
Main.tf
Create storage account with each name listed in variable
resource "azurerm_storage_account" "storage_account" {
// count = length(var.Storage_list)
//name = var.Storage_list[count.index].name
for_each = toset(var.Storage_list)
name=each.value
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
account_tier = "Standard"
account_replication_type = "LRS"
}
Create container for each value of storage account according to the requirement.
resource "azurerm_storage_container" "container" {
for_each = {for idx, val in local.flat_list: idx => val}
name = each.value[1].name
container_access_type = each.value[1].access_type
storage_account_name = azurerm_storage_account.storage_account[each.value[0]].name
}
Create locals to setproduct for double loop
locals {
flat_list = setproduct(var.Storage_list, var.containers_list)
}
Create container for each value of storage account according to the requirement.
resource "azurerm_storage_container" "container" {
for_each = {for idx, val in local.flat_list: idx => val}
name = each.value[1].name
container_access_type = each.value[1].access_type
storage_account_name = azurerm_storage_account.storage_account.each.value[0].name
}
Executed code:
Storage accounts created according to number of variables.
3 Containers as per variables set are created for each storage account.
Reference : terraform-nested-for-each-loop-in-azure-storage-account | SO
When I create a linked service in Azure Data Factory (ADF) for Databricks with terraform (using azurerm_data_factory_linked_service_azure_databricks) the linked service shows up only in live mode.
How can I make the linked service available in GIT mode where all the other ADF pipeline configurations are stored?
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=2.97.0"
}
databricks = {
source = "databrickslabs/databricks"
}
}
}
provider "azurerm" {
features {}
}
provider "databricks" {
host = azurerm_databricks_workspace.this.workspace_url
}
data "azurerm_client_config" "this" {
}
resource "azurerm_data_factory" "this" {
name = "myadf-9182371362"
resource_group_name = "testrg"
location = "East US"
identity {
type = "SystemAssigned"
}
vsts_configuration {
account_name = "mydevopsorg"
branch_name = "main"
project_name = "adftest"
repository_name = "adftest"
root_folder = "/adf/"
tenant_id = data.azurerm_client_config.this.tenant_id
}
}
resource "azurerm_databricks_workspace" "this" {
name = "mydbworkspace"
resource_group_name = "testrg"
location = "East US"
sku = "standard"
}
data "databricks_node_type" "smallest" {
local_disk = true
depends_on = [
azurerm_databricks_workspace.this
]
}
data "databricks_spark_version" "latest_lts" {
long_term_support = true
depends_on = [
azurerm_databricks_workspace.this
]
}
resource "databricks_cluster" "this" {
cluster_name = "Single Node"
spark_version = data.databricks_spark_version.latest_lts.id
node_type_id = data.databricks_node_type.smallest.id
autotermination_minutes = 20
spark_conf = {
"spark.databricks.cluster.profile" : "singleNode"
"spark.master" : "local[*]"
}
depends_on = [
azurerm_databricks_workspace.this
]
custom_tags = {
"ResourceClass" = "SingleNode"
}
}
data "azurerm_resource_group" "this" {
name = "testrg"
}
resource "azurerm_role_assignment" "example" {
scope = data.azurerm_resource_group.this.id
role_definition_name = "Contributor"
principal_id = azurerm_data_factory.this.identity[0].principal_id
}
resource "azurerm_data_factory_linked_service_azure_databricks" "msi_linked" {
name = "ADBLinkedServiceViaMSI"
data_factory_id = azurerm_data_factory.this.id
resource_group_name = "testrg"
description = "ADB Linked Service via MSI"
adb_domain = "https://${azurerm_databricks_workspace.this.workspace_url}"
existing_cluster_id = databricks_cluster.this.id
msi_work_space_resource_id = azurerm_databricks_workspace.this.id
}
result in git mode
result in live mode
I am executing terraform.exe apply on windows and receive the error:
azurerm_subnet.subnet: Refreshing state... [id=<...>]
azurerm_app_service_plan.service_plan: Refreshing state... [id=<...>]
azurerm_app_service.app: Refreshing state... [id=<...>]
azurerm_app_service_virtual_network_swift_connection.test: Refreshing state... [id=<...>]
azurerm_app_service_slot.production: Refreshing state... [id=<...>]
azurerm_app_service_slot.staging: Refreshing state... [id=<...>]
Error: ID was missing the `slots` element
I am attempting to build an Azure WebApp with different slots and docker images with terraform. It should deploy an Azure WebApp with different slots based on Dockerfile images.
The first run is without an error. I receive the error when I refresh the resources.
I am using azurerm provider version 2.1.0 and the azurerm backend.
See the following terraform file:
terraform {
backend "azurerm" {
resource_group_name = "..."
storage_account_name = "..."
container_name = "..."
key = "..."
subscription_id = "..."
tenant_id = "..."
sas_token = "...."
}
}
provider "azurerm" {
version = "~>2.1.0"
features {}
}
variable "environment" {
default = "production"
}
variable "resource_group" {}
variable "location" {
default = "West Europe"
}
variable "app_name" {}
variable "network" {}
variable "subnet_prefix" {}
resource "azurerm_app_service_plan" "service_plan" {
name = var.app_name
location = var.location
resource_group_name = var.resource_group
kind = "Linux"
reserved = true
sku {
tier = "Standard"
size = "S1"
}
tags = {
Environment = var.environment
Cost = "€0,081/Stunde"
}
}
resource "azurerm_app_service" "app" {
name = var.app_name
location = var.location
resource_group_name = var.resource_group
app_service_plan_id = azurerm_app_service_plan.service_plan.id
depends_on = [azurerm_app_service_plan.service_plan]
site_config {
linux_fx_version = "DOCKER|<...>.azurecr.io/<...>:0.0.1-95"
always_on = "true"
}
app_settings = {
...
}
storage_account {
access_key = "..."
account_name = "..."
name = "certs"
share_name = "certs"
type = "AzureBlob"
mount_path = "/var/certs"
}
tags = {
Environment = var.environment
}
}
resource "azurerm_app_service_slot" "production" {
name = var.app_name
app_service_name = azurerm_app_service.app.name
location = azurerm_app_service.app.location
resource_group_name = var.resource_group
app_service_plan_id = azurerm_app_service_plan.service_plan.id
depends_on = [azurerm_app_service.app]
site_config {
linux_fx_version = "DOCKER|<...>.azurecr.io/<...>:0.0.1-95"
always_on = "true"
}
app_settings = {
"SOME_KEY" = "some-value"
}
}
resource "azurerm_app_service_slot" "staging" {
name = "staging"
app_service_name = azurerm_app_service.app.name
location = azurerm_app_service.app.location
resource_group_name = var.resource_group
app_service_plan_id = azurerm_app_service_plan.service_plan.id
depends_on = [azurerm_app_service.app]
site_config {
linux_fx_version = "DOCKER|<...>.azurecr.io/<...>:latest"
always_on = "true"
}
}
resource "azurerm_subnet" "subnet" {
name = var.app_name
resource_group_name = var.resource_group
virtual_network_name = var.network
address_prefix = var.subnet_prefix
delegation {
name = var.app_name
service_delegation {
name = "Microsoft.Web/serverFarms"
actions = [
"Microsoft.Network/networkinterfaces/*",
"Microsoft.Network/virtualNetworks/subnets/action",
"Microsoft.Network/virtualNetworks/subnets/join/action",
"Microsoft.Network/virtualNetworks/subnets/prepareNetworkPolicies/action",
"Microsoft.Network/virtualNetworks/subnets/unprepareNetworkPolicies/action"
]
}
}
}
resource "azurerm_app_service_virtual_network_swift_connection" "test" {
app_service_id = azurerm_app_service.app.id
subnet_id = azurerm_subnet.subnet.id
depends_on = [
azurerm_app_service.app,
azurerm_subnet.subnet
]
}
What does a missing slots element mean in this context?
Terraform treates resource identifiers as case sensitive, but azure doesn't.
Somewhere inside your state file you probably have an id like /Slots/ instead of /slots/.
You can use terraform state pull and terraform state push to manually edit your state file I think. Usually not recommended, but TF validation insists on forcing case sensitivity while the azure portal itself will show you resource IDs with inconsistent capitalization :/
Question and details
How can I allow a Kubernetes cluster in Azure to talk to an Azure Container Registry via terraform?
I want to load custom images from my Azure Container Registry. Unfortunately, I encounter a permissions error at the point where Kubernetes is supposed to download the image from the ACR.
What I have tried so far
My experiments without terraform (az cli)
It all works perfectly after I attach the acr to the aks via az cli:
az aks update -n myAKSCluster -g myResourceGroup --attach-acr <acrName>
My experiments with terraform
This is my terraform configuration; I have stripped some other stuff out. It works in itself.
terraform {
backend "azurerm" {
resource_group_name = "tf-state"
storage_account_name = "devopstfstate"
container_name = "tfstatetest"
key = "prod.terraform.tfstatetest"
}
}
provider "azurerm" {
}
provider "azuread" {
}
provider "random" {
}
# define the password
resource "random_string" "password" {
length = 32
special = true
}
# define the resource group
resource "azurerm_resource_group" "rg" {
name = "myrg"
location = "eastus2"
}
# define the app
resource "azuread_application" "tfapp" {
name = "mytfapp"
}
# define the service principal
resource "azuread_service_principal" "tfapp" {
application_id = azuread_application.tfapp.application_id
}
# define the service principal password
resource "azuread_service_principal_password" "tfapp" {
service_principal_id = azuread_service_principal.tfapp.id
end_date = "2020-12-31T09:00:00Z"
value = random_string.password.result
}
# define the container registry
resource "azurerm_container_registry" "acr" {
name = "mycontainerregistry2387987222"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
sku = "Basic"
admin_enabled = false
}
# define the kubernetes cluster
resource "azurerm_kubernetes_cluster" "mycluster" {
name = "myaks"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
dns_prefix = "mycluster"
network_profile {
network_plugin = "azure"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_B2s"
}
# Use the service principal created above
service_principal {
client_id = azuread_service_principal.tfapp.application_id
client_secret = azuread_service_principal_password.tfapp.value
}
tags = {
Environment = "demo"
}
windows_profile {
admin_username = "dingding"
admin_password = random_string.password.result
}
}
# define the windows node pool for kubernetes
resource "azurerm_kubernetes_cluster_node_pool" "winpool" {
name = "winp"
kubernetes_cluster_id = azurerm_kubernetes_cluster.mycluster.id
vm_size = "Standard_B2s"
node_count = 1
os_type = "Windows"
}
# define the kubernetes name space
resource "kubernetes_namespace" "namesp" {
metadata {
name = "namesp"
}
}
# Try to give permissions, to let the AKR access the ACR
resource "azurerm_role_assignment" "acrpull_role" {
scope = azurerm_container_registry.acr.id
role_definition_name = "AcrPull"
principal_id = azuread_service_principal.tfapp.object_id
skip_service_principal_aad_check = true
}
This code is adapted from https://github.com/terraform-providers/terraform-provider-azuread/issues/104.
Unfortunately, when I launch a container inside the kubernetes cluster, I receive an error message:
Failed to pull image "mycontainerregistry.azurecr.io/myunittests": [rpc error: code = Unknown desc = Error response from daemon: manifest for mycontainerregistry.azurecr.io/myunittests:latest not found: manifest unknown: manifest unknown, rpc error: code = Unknown desc = Error response from daemon: Get https://mycontainerregistry.azurecr.io/v2/myunittests/manifests/latest: unauthorized: authentication required]
Update / note:
When I run terraform apply with the above code, the creation of resources is interrupted:
azurerm_container_registry.acr: Creation complete after 18s [id=/subscriptions/000/resourceGroups/myrg/providers/Microsoft.ContainerRegistry/registries/mycontainerregistry2387987222]
azurerm_role_assignment.acrpull_role: Creating...
azuread_service_principal_password.tfapp: Still creating... [10s elapsed]
azuread_service_principal_password.tfapp: Creation complete after 12s [id=000/000]
azurerm_kubernetes_cluster.mycluster: Creating...
azurerm_role_assignment.acrpull_role: Creation complete after 8s [id=/subscriptions/000/resourceGroups/myrg/providers/Microsoft.ContainerRegistry/registries/mycontainerregistry2387987222/providers/Microsoft.Authorization/roleAssignments/000]
azurerm_kubernetes_cluster.mycluster: Still creating... [10s elapsed]
Error: Error creating Managed Kubernetes Cluster "myaks" (Resource Group "myrg"): containerservice.ManagedClustersClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="ServicePrincipalNotFound" Message="Service principal clientID: 000 not found in Active Directory tenant 000, Please see https://aka.ms/aks-sp-help for more details."
on test.tf line 56, in resource "azurerm_kubernetes_cluster" "mycluster":
56: resource "azurerm_kubernetes_cluster" "mycluster" {
I think, however, that this is just because it takes a few minutes for the service principal to be created. When I run terraform apply again a few minutes later, it goes beyond that point without issues.
(I did up the answer above)
Just adding a simpler way where you don't need to create a service principal for anyone else that might need it.
resource "azurerm_kubernetes_cluster" "kubweb" {
name = local.cluster_web
location = local.rgloc
resource_group_name = local.rgname
dns_prefix = "${local.cluster_web}-dns"
kubernetes_version = local.kubversion
# used to group all the internal objects of this cluster
node_resource_group = "${local.cluster_web}-rg-node"
# azure will assign the id automatically
identity {
type = "SystemAssigned"
}
default_node_pool {
name = "nodepool1"
node_count = 4
vm_size = local.vm_size
orchestrator_version = local.kubversion
}
role_based_access_control {
enabled = true
}
addon_profile {
kube_dashboard {
enabled = true
}
}
tags = {
environment = local.env
}
}
resource "azurerm_container_registry" "acr" {
name = "acr1"
resource_group_name = local.rgname
location = local.rgloc
sku = "Standard"
admin_enabled = true
tags = {
environment = local.env
}
}
# add the role to the identity the kubernetes cluster was assigned
resource "azurerm_role_assignment" "kubweb_to_acr" {
scope = azurerm_container_registry.acr.id
role_definition_name = "AcrPull"
principal_id = azurerm_kubernetes_cluster.kubweb.kubelet_identity[0].object_id
}
This code worked for me.
resource "azuread_application" "aks_sp" {
name = "sp-aks-${local.cluster_name}"
}
resource "azuread_service_principal" "aks_sp" {
application_id = azuread_application.aks_sp.application_id
app_role_assignment_required = false
}
resource "azuread_service_principal_password" "aks_sp" {
service_principal_id = azuread_service_principal.aks_sp.id
value = random_string.aks_sp_password.result
end_date_relative = "8760h" # 1 year
lifecycle {
ignore_changes = [
value,
end_date_relative
]
}
}
resource "azuread_application_password" "aks_sp" {
application_object_id = azuread_application.aks_sp.id
value = random_string.aks_sp_secret.result
end_date_relative = "8760h" # 1 year
lifecycle {
ignore_changes = [
value,
end_date_relative
]
}
}
data "azurerm_container_registry" "pyp" {
name = var.container_registry_name
resource_group_name = var.container_registry_resource_group_name
}
resource "azurerm_role_assignment" "aks_sp_container_registry" {
scope = data.azurerm_container_registry.pyp.id
role_definition_name = "AcrPull"
principal_id = azuread_service_principal.aks_sp.object_id
}
# requires Azure Provider 1.37+
resource "azurerm_kubernetes_cluster" "pyp" {
name = local.cluster_name
location = azurerm_resource_group.pyp.location
resource_group_name = azurerm_resource_group.pyp.name
dns_prefix = local.env_name_nosymbols
kubernetes_version = local.kubernetes_version
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2s_v3"
os_disk_size_gb = 80
}
windows_profile {
admin_username = "winadm"
admin_password = random_string.windows_profile_password.result
}
network_profile {
network_plugin = "azure"
dns_service_ip = cidrhost(local.service_cidr, 10)
docker_bridge_cidr = "172.17.0.1/16"
service_cidr = local.service_cidr
load_balancer_sku = "standard"
}
service_principal {
client_id = azuread_service_principal.aks_sp.application_id
client_secret = random_string.aks_sp_password.result
}
addon_profile {
oms_agent {
enabled = true
log_analytics_workspace_id = azurerm_log_analytics_workspace.pyp.id
}
}
tags = local.tags
}
source https://github.com/giuliov/pipeline-your-pipelines/tree/master/src/kubernetes/terraform
Just want to go into more depth as this was something I struggled with as-well.
The recommended approach is to use Managed Identities instead of Service Principal due to less overhead.
Create a Container Registry:
resource "azurerm_container_registry" "acr" {
name = "acr"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
sku = "Standard"
admin_enabled = false
}
Create a AKS Cluster, the code below creates the AKS Cluster with 2 Identities:
A System Assigned Identity which is assigned to the Control Plane.
A User Assigned Managed Identity is also automatically created and assigned to the Kubelet, notice I have no specific code for that as it happens automatically.
The Kubelet is the process which goes to the Container Registry to pull the image, thus we need to make sure this User Assigned Managed Identity has the AcrPull Role on the Container Registry.
resource "azurerm_kubernetes_cluster" "aks" {
name = "aks"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
dns_prefix = "aks"
node_resource_group = "aks-node"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_Ds2_v2"
enable_auto_scaling = false
type = "VirtualMachineScaleSets"
vnet_subnet_id = azurerm_subnet.aks_subnet.id
max_pods = 50
}
network_profile {
network_plugin = "azure"
load_balancer_sku = "Standard"
}
identity {
type = "SystemAssigned"
}
}
Create Role Assignment mentioned above to allow the User Assigned Managed Identity to Pull from the Container Registry.
resource "azurerm_role_assignment" "ra" {
principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id
role_definition_name = "AcrPull"
scope = azurerm_container_registry.acr.id
skip_service_principal_aad_check = true
}
Hope that clears things up for you, as I have seen some confusion on the internet about the two identities created.
source: https://jimferrari.com/2022/02/09/attach-azure-container-registry-to-azure-kubernetes-service-terraform/
The Terraform documentation for the Azure Container Registry resource now keeps track of this, which should always be up to date.
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/container_registry#example-usage-attaching-a-container-registry-to-a-kubernetes-cluster
resource "azurerm_resource_group" "example" {
name = "example-resources"
location = "West Europe"
}
resource "azurerm_container_registry" "example" {
name = "containerRegistry1"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
}
resource "azurerm_kubernetes_cluster" "example" {
name = "example-aks1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "exampleaks1"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
}
identity {
type = "SystemAssigned"
}
tags = {
Environment = "Production"
}
}
resource "azurerm_role_assignment" "example" {
principal_id = azurerm_kubernetes_cluster.example.kubelet_identity[0].object_id
role_definition_name = "AcrPull"
scope = azurerm_container_registry.example.id
skip_service_principal_aad_check = true
}
I'm developing a terraform template to automatically enroll a new built Azure VM to Azure recovery service vault. All the resources I can found in Azurerm provider are
azurerm_recovery_services_protection_policy_vm
azurerm_recovery_services_protected_vm
azurerm_recovery_services_vault
Seems none of them can enroll a VM to recovery service vault. Does Terraform have this feature?
You can see the azurerm_recovery_services_protected_vm in Terraform, and the argument source_vm_id shows:
Specifies the ID of the VM to backup
It can refer to the VM which you want to back up in the recovery service vault. Create the policy with azurerm_recovery_services_protection_policy_vm and the recovery service vault with azurerm_recovery_services_vault.
Update
You can back up the VM with Recovery vault with azurerm_recovery_services_protected_vm through Terraform. The code like this:
data "azurerm_virtual_machine" "azurevm" {
name = "vm_name"
resource_group_name = "group_name"
}
resource "azurerm_resource_group" "rg" {
name = "recovery_group_name"
location = "eastus"
}
resource "azurerm_recovery_services_vault" "vault" {
name = "azurerecoveryvaulti1"
location = "${azurerm_resource_group.rg.location}"
resource_group_name = "${azurerm_resource_group.rg.name}"
sku = "Standard"
}
resource "azurerm_recovery_services_protection_policy_vm" "test" {
name = "azurerecoveryvaultpolicy1"
resource_group_name = "${azurerm_resource_group.rg.name}"
recovery_vault_name = "${azurerm_recovery_services_vault.vault.name}"
backup = {
frequency = "Daily"
time = "23:00"
}
retention_daily = {
count = 1
}
}
resource "azurerm_recovery_services_protected_vm" "example" {
resource_group_name = "${azurerm_resource_group.rg.name}"
recovery_vault_name = "${azurerm_recovery_services_vault.vault.name}"
source_vm_id = "${data.azurerm_virtual_machine.azurevm.id}"
backup_policy_id = "${azurerm_recovery_services_protection_policy_vm.test.id}"
}
For the test, I create the new Recovery Vault. You can use the existing with data azurerm_recovery_services_vault. Create a new policy then create the back up the vm with azurerm_recovery_services_protected_vm like above.
The resource type azurerm_recovery_services_protected_vm is no more supported hashicorp/azurerm provider
You can use azurerm_backup_protected_vm instead of azurerm_recovery_services_protected_vm,
resource "azurerm_backup_policy_vm" "vm_backup_policy" {
count = var.vm_backup_policy_name != "" ? 1 : 0
name = var.vm_backup_policy_name
resource_group_name = var.resource_group_name
recovery_vault_name = azurerm_recovery_services_vault.vault.name
timezone = var.vm_backup_policy_timezone
backup {
frequency = var.vm_backup_policy_frequency
time = var.vm_backup_policy_time
}
retention_daily {
count = var.vm_backup_daily_policy_retention
}
dynamic "retention_weekly" {
for_each = var.vm_backup_weekly != {} ? [var.vm_backup_weekly] : []
content {
count = lookup(retention_weekly.value, "count")
weekdays = lookup(retention_weekly.value, "weekdays")
}
}
dynamic "retention_monthly" {
for_each = var.vm_backup_monthly != {} ? [var.vm_backup_monthly] : []
content {
count = lookup(retention_monthly.value, "count")
weekdays = lookup(retention_monthly.value, "weekdays")
weeks = lookup(retention_monthly.value, "weeks")
}
}
dynamic "retention_yearly" {
for_each = var.vm_backup_yearly != {} ? [var.vm_backup_yearly] : []
content {
count = lookup(retention_yearly.value, "count")
weekdays = lookup(retention_yearly.value, "weekdays")
weeks = lookup(retention_yearly.value, "weeks")
months = lookup(retention_yearly.value, "months")
}
}
}
resource "azurerm_backup_protected_vm" "vm" {
resource_group_name = var.resource_group_name
recovery_vault_name = var.recovery_vault_custom_name
source_vm_id = var.source_vm_id
backup_policy_id = azurerm_backup_policy_vm.vm_backup_policy.id
}