I would like a guide on how to automatically deploy to a newly provisioned aks cluster after provisioning with terraform. for more context, i am building a one click full infrastructure provisioning and deployment all in one script. below is my structure for more understanding
main.tf
resource "azurerm_kubernetes_cluster" "aks" {
name = var.cluster_name
kubernetes_version = var.kubernetes_version
location = var.location
resource_group_name = var.resource_group_name
dns_prefix = var.cluster_name
default_node_pool {
name = "system"
node_count = var.system_node_count
vm_size = "Standard_DS2_v2"
type = "VirtualMachineScaleSets"
availability_zones = [1, 2, 3]
enable_auto_scaling = false
}
identity {
type = "SystemAssigned"
}
network_profile {
load_balancer_sku = "Standard"
network_plugin = "kubenet"
}
role_based_access_control {
enabled = true
}
}
output.tf
resource "local_file" "kubeconfig" {
depends_on = [azurerm_kubernetes_cluster.aks]
filename = "kubeconfig"
content = azurerm_kubernetes_cluster.aks.kube_config_raw
}
deployment.tf
resource "kubernetes_deployment" "sdc" {
metadata {
name = "sdc"
labels = {
app = "serviceName"
#version = "v1.0"
}
namespace = "default"
}
spec {
replicas = 1
selector {
match_labels = {
app = "serviceName"
}
}
template {
metadata {
labels = {
app = "serviceName"
# version = "v1.0"
}
}
spec {
container {
image = "myImage"
name = "serviceName"
port {
container_port = 80
}
}
}
}
}
depends_on = [
azurerm_kubernetes_cluster.aks
]
}
Everything works perfectly, my kubeconfig file is created and downloaded. my major headache is how to make the terraform apply process use the kubeconfig file created and also run the deployment. making my terraform script fully automated. I basically want to provision and deploy into the newly provisioned cluster all in one run.
Looking forward to good help.
Thanks guys
Related
I am a new in terraform and using below terraform template to create Azure App service plan, App service and App insight together
# Configure the Azure provider
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 2.98"
}
}
required_version = ">= 1.1.6"
}
provider "azurerm" {
features { }
}
resource "azurerm_application_insights" "appService-app_insights" {
name ="${var.prefix}-${var.App_Insights}"
location = var.Location
resource_group_name = var.ResourceGroup
application_type = "web" # Node.JS ,java
}
resource "azurerm_app_service" "appservice" {
name ="${var.prefix}-${var.appservice_name}"
location = var.Location
resource_group_name = var.ResourceGroup
app_service_plan_id = azurerm_app_service_plan.appserviceplan.id
https_only = true
site_config {
linux_fx_version = "NODE|10.14"
}
app_settings = {
# "SOME_KEY" = "some-value"
"APPINSIGHTS_INSTRUMENTATIONKEY" = azurerm_application_insights.appService-app_insights.instrumentation_key
}
depends_on = [
azurerm_app_service_plan.appserviceplan,
azurerm_application_insights.appService-app_insights
]
}
# create the AppService Plan for the App Service hosting our website
resource "azurerm_app_service_plan" "appserviceplan" {
name ="${var.prefix}-${var.app_service_plan_name}"
location = var.Location
resource_group_name = var.ResourceGroup
kind ="linux"
reserved = true
sku {
tier = "Standard" #
size = "S1"
}
}
I am generating a variable.tf file at runtime which is quite simple in this case
variable "ResourceGroup" {
default = "TerraRG"
}
variable "Location" {
default = "westeurope"
}
variable "app_service_plan_name" {
default = "terra-asp"
}
variable "appservice_name" {
default = "terra-app"
}
variable "prefix" {
default = "pre"
}
variable "App_Insights" {
default = "terra-ai"
}
Everything working good till here.
No I am trying to extend my infra and I want to go with multiple App + App Service Plan + App Insight which might look like below Json
{
"_comment": "Web App Config",
"webapps": [
{
"Appservice": "app1",
"Appserviceplan": "asp1",
"InstrumentationKey": "abc"
},
{
"Appservice": "app2",
"Appserviceplan": "asp2",
"InstrumentationKey": "def"
},
{
"Appservice": "app3",
"Appserviceplan": "asp2",
"InstrumentationKey": "def"
}
]
}
How can I target such a resource creation.
Should I think on creating App Service Plan First and App Insight and then should plan creating Apps. What could be a better approach for this scenario.
Since app1,app2,app3 are not globally unique i have tried with different name.
I have tried with app service name testapprahuluni12345,testapp12346and testapp12347.
main.tf
# Configure the Azure provider
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 2.98"
}
}
}
provider "azurerm" {
features { }
}
resource "azurerm_application_insights" "appService-app_insights" {
name ="${var.prefix}-${var.App_Insights}"
location = var.Location
resource_group_name = var.ResourceGroup
application_type = "web" # Node.JS ,java
}
resource "azurerm_app_service_plan" "appserviceplan" {
count = length(var.app_service_plan_name)
name = var.app_service_plan_name[count.index]
location = var.Location
resource_group_name = var.ResourceGroup
kind ="linux"
reserved = true
sku {
tier = "Standard" #
size = "S1"
}
}
# create the AppService Plan for the App Service hosting our website
resource "azurerm_app_service" "appservice" {
count = length(var.app_names)
name = var.app_names[count.index]
location = var.Location
resource_group_name = var.ResourceGroup
app_service_plan_id = azurerm_app_service_plan.appserviceplan[count.index].id
https_only = true
site_config {
linux_fx_version = "NODE|10.14"
}
app_settings = {
# "SOME_KEY" = "some-value"
"APPINSIGHTS_INSTRUMENTATIONKEY" = azurerm_application_insights.appService-app_insights.instrumentation_key
}
depends_on = [
azurerm_app_service_plan.appserviceplan,
azurerm_application_insights.appService-app_insights
]
}
variable.tf
variable "ResourceGroup" {
default = "v-XXXXX--ree"
}
variable "Location" {
default = "West US 2"
}
/*variable "app_service_plan_name" {
default = "terra-asp"
}
variable "appservice_name" {
default = "terra-app"
}
*/
variable "prefix" {
default = "pre"
}
variable "App_Insights" {
default = "terra-ai"
}
variable "app_names" {
description = "App Service Names"
type = list(string)
default = ["testapprahuluni12345", "testapp12346", "testapp12347"]
}
variable "app_service_plan_name" {
description = "App Service Plan Name"
type = list(string)
default = ["asp1", "asp2", "asp2"]
}
OutPut--
I have thee following cluster, and it creates fine. But I also want to add a kubernetes_namespace resource so that a namespace gets created after the private cluster is recreated. But how do I get terraform to actually connect to the private cluster after it creates?
resource "azurerm_kubernetes_cluster" "aks_cluster" {
for_each = var.aks_clusters
name = "aks-${each.key}-${var.env}-001"
location = azurerm_resource_group.aks_rg.location
resource_group_name = azurerm_resource_group.aks_rg.name
dns_prefix = "test-${each.key}-aks-cluster"
kubernetes_version = data.azurerm_kubernetes_service_versions.current.latest_version
private_cluster_enabled = true #false until networking is complete
private_cluster_public_fqdn_enabled = true
#
# - Name must start with a lowercase letter, have max length of 12,
# and only have characters a-z0-9.
#
default_node_pool {
name = substr("test${each.key}",0,12)
vm_size = var.aks_cluster_vm_size
os_disk_size_gb = var.aks_cluster_os_size_gb
orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version
availability_zones = [1, 2]
enable_auto_scaling = true
max_count = var.node_max_count
min_count = var.node_min_count
node_count = var.node_count
type = "VirtualMachineScaleSets"
vnet_subnet_id = var.aks_subnets[each.key].id
node_labels = {
"type" = each.key
"environment" = var.env
}
tags = {
"type" = each.key
"environment" = var.env
}
}
network_profile {
network_plugin = "kubenet"
pod_cidr = var.aks_subnets[each.key].pcidr
service_cidr = var.aks_subnets[each.key].scidr
docker_bridge_cidr = var.aks_subnets[each.key].dockcidr
dns_service_ip = var.aks_subnets[each.key].dnsip
}
service_principal {
client_id = var.aks_app_id
client_secret = var.aks_password
}
role_based_access_control {
enabled = true
}
tags = local.resource_tags
}
with a private build agent in the same VNet or in a peered VNet. The Terraform State should be stored in a blob container or wherever you want, just not locally.
If you are trying to do this from the local machine, it's not possible as the Terraform client won't be able to resolve the API server's IP address and send REST calls to it if they are on different networks in case of a private AKS cluster.
The best option would be to install the Terraform client on a machine in the network where you would like to deploy the AKS cluster or a connected network. In any case, the network has to be created before attempting to create the cluster. This is because when you are creating a namespace with Terraform in an AKS cluster, the provider is no longer azurerm_* and it will try to directly access the API server (it does not send a REST API call to ARM, like the az aks command invoke).
Error from the local machine for a private cluster:
But it will be possible for a public cluster:
So, for a workaround you can create a VM in the same vnet that you will be using for the AKS cluster and deploy the terraform script from inside the VM.
provider "azurerm" {
features {}
}
data "azurerm_resource_group" "test" {
name = "resourcegroup"
}
data "azurerm_subnet" "aks-subnet" {
name = "default"
resource_group_name = data.azurerm_resource_group.test.name
virtual_network_name = "ansuman-vnet"
}
resource "azurerm_kubernetes_cluster" "aks_cluster" {
name = "ansuman-aks-001"
location = data.azurerm_resource_group.test.location
resource_group_name = data.azurerm_resource_group.test.name
dns_prefix = "ansuman-aks-cluster"
private_cluster_enabled = true
private_cluster_public_fqdn_enabled = true
default_node_pool {
name = "default"
vm_size = "Standard_D2_v2"
availability_zones = [1, 2]
enable_auto_scaling = true
max_count = 4
min_count = 1
node_count = 2
type = "VirtualMachineScaleSets"
vnet_subnet_id = data.azurerm_subnet.aks-subnet.id
}
network_profile {
network_plugin = "kubenet"
dns_service_ip = "10.1.0.10"
docker_bridge_cidr = "170.10.0.1/16"
service_cidr = "10.1.0.0/16"
}
service_principal {
client_id = "f6a2f33d-xxxxxx-xxxxxx-xxxxxxx-xxxxxx"
client_secret = "mLk7Q~2S6D1Omoe1xxxxxxxxxxxxxxxxxxxxx"
}
role_based_access_control {
enabled = true
}
}
provider "kubernetes" {
host = "${azurerm_kubernetes_cluster.aks_cluster.kube_config.0.host}"
username = "${azurerm_kubernetes_cluster.aks_cluster.kube_config.0.username}"
password = "${azurerm_kubernetes_cluster.aks_cluster.kube_config.0.password}"
client_certificate = base64decode("${azurerm_kubernetes_cluster.aks_cluster.kube_config.0.client_certificate}")
client_key = base64decode("${azurerm_kubernetes_cluster.aks_cluster.kube_config.0.client_key}")
cluster_ca_certificate = base64decode("${azurerm_kubernetes_cluster.aks_cluster.kube_config.0.cluster_ca_certificate}")
}
resource "kubernetes_namespace" "app_namespace" {
metadata {
name = "my-namespace"
}
depends_on = [
azurerm_kubernetes_cluster.aks_cluster
]
}
Output:
Note: If you want to verify if the namespace is added or not then open portal from your VM only and then you can verify as they are on same network.
For more information on how to connect to a private aks_cluster, you can refer the below link.
Reference:
Create a private Azure Kubernetes Service cluster - Azure Kubernetes Service | Microsoft documentation
Application Gateway ingress controller is added for aks in terraform file.After adding showing the above error.
My module in azure is below
Cluster Resource Group
resource “azurerm_resource_group” “aks” {
name = var.resource_group_name
location = var.location
}
AKS Cluster Network
module “aks_network” {
source = “…/modules/aks_network”
subnet_name = var.subnet_name
vnet_name = var.vnet_name
name = azurerm_resource_group.aks.name
subnet_cidr = var.subnet_cidr
location = var.location
address_space = var.address_space
app_gateway_subnet_name = var.app_gateway_subnet_name
app_gateway_subnet_address_prefix = var.app_gateway_subnet_address_prefix
}
AKS Log Analytics
module “log_analytics” {
source = “…/modules/log_analytics”
name = azurerm_resource_group.aks.name
log_analytics_workspace_location = var.log_analytics_workspace_location
log_analytics_workspace_name = var.log_analytics_workspace_name
log_analytics_workspace_sku = var.log_analytics_workspace_sku
}
AKS Cluster
module “aks_cluster” {
source = “…/modules/aks-cluster”
cluster_name = var.cluster_name
location = var.location
dns_prefix = var.dns_prefix
name = azurerm_resource_group.aks.name
kubernetes_version = var.kubernetes_version
node_count = var.node_count
min_count = var.min_count
max_count = var.max_count
vm_size = var.vm_size
service_cidr = var.service_cidr
network_plugin = var.network_plugin
vnet_subnet_id = module.aks_network.aks_subnet_id
client_id = var.client_id
client_secret = var.client_secret
environment = var.environment
subnet_id = module.aks_network.subnet_app_gateway_id
}
and below are the variable file for the above module
variables.tf
variable “client_id” {
description = “Azure Service Principal id (client id)”
}
variable “client_secret” {
description = “Azure client Service Principal secret (client secret)”
}
variable resource_group_name {
description = “Resource Group name”
}
variable “node_count” {
description = “number of nodes to deploy”
}
variable “dns_prefix” {
description = “DNS Suffix”
}
variable cluster_name {
description = “AKS cluster name”
}
variable location {
description = “azure location to deploy resources”
}
variable log_analytics_workspace_name {
description = “azure name to deploy log analytics workspace”
}
variable log_analytics_workspace_location {
description = “azure location to deploy log analytics workspace”
}
variable log_analytics_workspace_sku {
description = “azure sku to deploy log analytics workspace”
}
variable subnet_name {
description = “subnet id where the nodes will be deployed”
}
variable vnet_name {
description = “vnet id where the nodes will be deployed”
}
variable subnet_cidr {
description = “the subnet cidr range”
}
variable kubernetes_version {
description = “version of the kubernetes cluster”
}
variable “vm_size” {
description = “size/type of VM to use for nodes”
}
variable “service_cidr” {
description = “size/type of VM to use for nodes”
}
variable “network_plugin” {
description = “size/type of VM to use for nodes”
}
variable “address_space” {
description = “The address space that is used the virtual network”
}
variable “min_count” {
description = “Minimum Node Count”
}
variable “max_count” {
description = “Maximum Node Count”
}
variable “environment” {
description = “Environment”
}
variable “app_gateway_subnet_name” {
description = “App Gateway Subnet Name”
}
variable “app_gateway_subnet_address_prefix” {
description = “App Gateway Subnet Address Prefix”
}
aks_network
main.tf is as below
resource “azurerm_virtual_network” “aks_vnet” {
name = var.vnet_name
address_space = [var.address_space]
resource_group_name = var.name
location = var.location
}
resource “azurerm_subnet” “aks_subnet” {
name = var.subnet_name
resource_group_name = var.name
virtual_network_name = azurerm_virtual_network.aks_vnet.name
address_prefix = var.subnet_cidr
}
resource “azurerm_subnet” “subnet_app_gateway” {
resource_group_name = var.name
virtual_network_name = azurerm_virtual_network.aks_vnet.name
name = var.app_gateway_subnet_name
address_prefix = var.app_gateway_subnet_address_prefix
}
variables.tf
variable “subnet_name” {
description = “name to give the subnet”
}
variable “name” {
description = “resource group that the vnet resides in”
}
variable “vnet_name” {
description = “name of the vnet that this subnet will belong to”
}
variable “subnet_cidr” {
description = “the subnet cidr range”
}
variable “location” {
description = “the cluster location”
}
variable “address_space” {
description = “Network address space”
}
variable “app_gateway_subnet_name” {
description = “App Gateway Subnet Name.”
default = “agw-subnet”
}
variable “app_gateway_subnet_address_prefix” {
description = “Containers DNS server IP address.”
default = “10.100.0.0/24”
}
aks-cluster
main.tf
resource “azurerm_kubernetes_cluster” “cluster” {
name = var.cluster_name
location = var.location
resource_group_name = var.name
dns_prefix = var.dns_prefix
kubernetes_version = var.kubernetes_version
default_node_pool {
name = var.default_pool_name
node_count = var.node_count
vm_size = var.vm_size
vnet_subnet_id = var.vnet_subnet_id
type = var.default_pool_type
enable_auto_scaling = true
min_count = var.min_count
max_count = var.max_count
}
addon_profile {
azure_policy {
enabled = true
}
ingress_application_gateway {
enabled = true
subnet_id = resource.azurerm_subnet.subnet_app_gateway.id
}
}
role_based_access_control {
enabled = true
}
network_profile {
network_plugin = var.network_plugin
network_policy = "azure"
service_cidr = var.service_cidr
dns_service_ip = "10.0.0.10"
docker_bridge_cidr = "172.17.0.1/16"
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
tags = {
Environment = var.environment
}
}
variables.tf
variable “dns_prefix” {
description = “DNS prefix”
}
variable “location” {
description = “azure location to deploy resources”
}
variable “cluster_name” {
description = “AKS cluster name”
}
variable “name” {
description = “name of the resource group to deploy AKS cluster in”
}
variable “kubernetes_version” {
description = “version of the kubernetes cluster”
}
variable “agent_pool_name” {
description = “name for the agent pool profile”
default = “agentpool”
}
variable “agent_pool_type” {
description = “type of the agent pool (AvailabilitySet and VirtualMachineScaleSets)”
default = “VirtualMachineScaleSets”
}
variable “node_count” {
description = “number of nodes to deploy”
}
variable “vm_size” {
description = “size/type of VM to use for nodes”
}
variable “vnet_subnet_id” {
description = “vnet id where the nodes will be deployed”
}
variable “network_plugin” {
description = “network plugin for kubenretes network overlay (azure or calico)”
}
variable “service_cidr” {
description = “kubernetes internal service cidr range”
}
variable “client_id” {
description = “Service principle Client Id”
}
variable “client_secret” {
description = “Service principle Client Secret”
}
variable “min_count” {
description = “Minimum Node Count”
}
variable “max_count” {
description = “Maximum Node Count”
}
variable “default_pool_name” {
description = “name for the agent pool profile”
default = “agentpool”
}
variable “default_pool_type” {
description = “type of the agent pool (AvailabilitySet and VirtualMachineScaleSets)”
default = “VirtualMachineScaleSets”
}
variable “environment” {
description = “Environment”
}
there is no such variable "subnet_id" in the aks-cluster module so removed the subnet_id in aks_cluster after removing showing the below error
Unsupported block type in aks-cluster folder main.tf inside it Blocks of type ingress_application_gateway are not expected here.
In case anybody else gets to this page when searching for this error message. We fixed the error "Blocks of type ingress_application_gateway are not expected here" by upgrading terraform provider azurerm from version 2.43 to 2.73
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~>2.73.0"
}
}
}
I want AKS POD to add and delete DNS records whenever a Service is created. I have achieved the same via GUI, But I want to use terraform to do the same.
Created AKS Cluster:
resource "azurerm_kubernetes_cluster" "aks_cluster" {
name = "${azurerm_resource_group.my-res-grp-in-tf.name}-cluster"
location = azurerm_resource_group.my-res-grp-in-tf.location
resource_group_name = azurerm_resource_group.my-res-grp-in-tf.name
dns_prefix = "${azurerm_resource_group.my-res-grp-in-tf.name}-cluster"
kubernetes_version = data.azurerm_kubernetes_service_versions.current.latest_version
node_resource_group = "${azurerm_resource_group.my-res-grp-in-tf.name}-nrg"
default_node_pool {
name = "systempool"
vm_size = "standard_d2s_v3"
orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version
availability_zones = [1, 2, 3]
enable_auto_scaling = true
max_count = 1
min_count = 1
os_disk_size_gb = 30
type = "VirtualMachineScaleSets"
node_labels = {
"nodepool-type" = "system"
"environment" = var.env
"nodepoolos" = "linux"
"app" = "system-apps"
}
tags = {
"nodepool-type" = "system"
"environment" = var.env
"nodepoolos" = "linux"
"app" = "system-apps"
}
}
# Identity (one of either identity or service_principal blocks must be specified.)
identity {
type = "SystemAssigned"
}
# Add On Profiles
addon_profile {
azure_policy {
enabled = true
}
kube_dashboard {
enabled = false
}
http_application_routing {
enabled = false
}
oms_agent {
enabled = true
log_analytics_workspace_id = azurerm_log_analytics_workspace.insights.id
}
}
# RBAC and Azure AD Integration Block
role_based_access_control {
enabled = true
azure_active_directory {
managed = true
admin_group_object_ids = [azuread_group.aks_administrators.id]
}
}
# Windows Profile
windows_profile {
admin_username = var.windows_admin_username
admin_password = var.windows_admin_password
}
# Linux Profile
linux_profile {
admin_username = "ubuntu"
ssh_key {
key_data = file(var.ssh_public_key)
}
}
# Network Profile
network_profile {
network_plugin = "azure"
load_balancer_sku = "Standard"
}
tags = {
Environment = var.env
}
# login into cluster
provisioner "local-exec" {
command = "az aks get-credentials --name ${azurerm_kubernetes_cluster.aks_cluster.name} --resource-group ${azurerm_resource_group.my-res-grp-in-tf.name} --admin"
}
}
I have created a resource group named "dns-zone-rg" specifically for this task.
resource "azurerm_resource_group" "dns-zone-rg-tf" {
name = "dns-zone-rg"
location = var.location
}
Created a DNS zone in "dns-zone-rg" resource group
resource "azurerm_dns_zone" "public-domain-dns-zone" {
name = "mydomain.xyz"
resource_group_name = azurerm_resource_group.dns-zone-rg-tf.name
}
Created Manage Identity "mi-for-dns-zone-rg" in "dns-zone-rg" resource group
resource "azurerm_user_assigned_identity" "manage-identity-tf" {
resource_group_name = azurerm_resource_group.dns-zone-rg-tf.name
location = var.location
name = "mi-for-dns-zone-rg"
}
Assigned "Contributor" role to manage identity "mi-for-dns-zone-rg" and given a scope to manage resources in resource group "dns-zone-rg".
resource "azurerm_role_assignment" "assign-reader-to-manage-identity" {
scope = azurerm_resource_group.dns-zone-rg-tf.id
role_definition_name = "Contributor"
principal_id = azurerm_user_assigned_identity.manage-identity-tf.principal_id
}
Now I want to associate this Manage Identity "mi-for-dns-zone-rg" to the "SystemNode Pool" created by AKS. I am not able to figure out how to do that. and how to fetch node pool details created by AKS.
Currently, It's not possible in Terraform.
You have to use local-exec in Terraform and Azure CLI commands to achieve the same.
resource "null_resource" "node-pool-name"{
depends_on = [azurerm_kubernetes_cluster.aks_cluster,azurerm_role_assignment.assign-reader-to-manage-identity]
provisioner "local-exec" {
command = "az vmss list -g ${azurerm_kubernetes_cluster.aks_cluster.node_resource_group} --query \"[?contains(name,'aks-systempool')].name\" --out tsv > ${path.module}/system-node-poolname.txt"
}
provisioner "local-exec" {
command = "az vmss identity assign -g ${azurerm_kubernetes_cluster.aks_cluster.node_resource_group} -n `cat ${path.module}/system-node-poolname.txt` --identities ${azurerm_user_assigned_identity.manage-identity-tf.id}"
}
}
Current state:
I have all services within a cluster and under just one resource_group. My problem is that I have to push all the services every time and my deploy is getting slow.
What I want to do: I want to split every service within my directory so I can deploy it separately. Now I have a backend to each service, so that can have his own remote state and won't change things when I deploy. However, can I push still have all the services within the same resource_group? If yes, how can I achieve that? If I need to create a resource group for each service that I want to deploy separately, can I still use the same cluster?
main.tf
provider "azurerm" {
version = "2.23.0"
features {}
}
resource "azurerm_resource_group" "main" {
name = "${var.resource_group_name}-${var.environment}"
location = var.location
timeouts {
create = "20m"
delete = "20m"
}
}
resource "tls_private_key" "key" {
algorithm = "RSA"
}
resource "azurerm_kubernetes_cluster" "main" {
name = "${var.cluster_name}-${var.environment}"
location = azurerm_resource_group.main.location
resource_group_name = azurerm_resource_group.main.name
dns_prefix = "${var.dns_prefix}-${var.environment}"
node_resource_group = "${var.resource_group_name}-${var.environment}-worker"
kubernetes_version = "1.18.6"
linux_profile {
admin_username = var.admin_username
ssh_key {
key_data = "${trimspace(tls_private_key.key.public_key_openssh)} ${var.admin_username}#azure.com"
}
}
default_node_pool {
name = "default"
node_count = var.agent_count
vm_size = "Standard_B2s"
os_disk_size_gb = 30
}
role_based_access_control {
enabled = "false"
}
addon_profile {
kube_dashboard {
enabled = "true"
}
}
network_profile {
network_plugin = "kubenet"
load_balancer_sku = "Standard"
}
timeouts {
create = "40m"
delete = "40m"
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
tags = {
Environment = "Production"
}
}
provider "kubernetes" {
version = "1.12.0"
load_config_file = "false"
host = azurerm_kubernetes_cluster.main.kube_config[0].host
client_certificate = base64decode(
azurerm_kubernetes_cluster.main.kube_config[0].client_certificate,
)
client_key = base64decode(azurerm_kubernetes_cluster.main.kube_config[0].client_key)
cluster_ca_certificate = base64decode(
azurerm_kubernetes_cluster.main.kube_config[0].cluster_ca_certificate,
)
}
backend.tf (for main)
terraform {
backend "azurerm" {}
}
client.tf (service that I want to deploy separately)
resource "kubernetes_deployment" "client" {
metadata {
name = "client"
labels = {
serviceName = "client"
}
}
timeouts {
create = "20m"
delete = "20m"
}
spec {
progress_deadline_seconds = 600
replicas = 1
selector {
match_labels = {
serviceName = "client"
}
}
template {
metadata {
labels = {
serviceName = "client"
}
}
}
}
}
}
resource "kubernetes_service" "client" {
metadata {
name = "client"
}
spec {
selector = {
serviceName = kubernetes_deployment.client.metadata[0].labels.serviceName
}
port {
port = 80
target_port = 80
}
}
}
backend.tf (for client)
terraform {
backend "azurerm" {
resource_group_name = "test-storage"
storage_account_name = "test"
container_name = "terraform"
key="test"
}
}
deployment.sh
terraform -v
terraform init \
-backend-config="resource_group_name=$TF_BACKEND_RES_GROUP" \
-backend-config="storage_account_name=$TF_BACKEND_STORAGE_ACC" \
-backend-config="container_name=$TF_BACKEND_CONTAINER" \
terraform plan
terraform apply -target="azurerm_resource_group.main" -auto-approve \
-var "environment=$ENVIRONMENT" \
-var "tag_version=$TAG_VERSION" \
PS: I can build the test resource-group from scratch if needed. Don't worry about his current state.
PS2: The state files are being saved into the right place, no issue about that.
If you want to deploy resources separately, you could take a look at terraform apply with this option.
-target=resource Resource to target. Operation will be limited to this
resource and its dependencies. This flag can be used
multiple times.
For example, just deploy a resource group and its dependencies like this,
terraform apply -target="azurerm_resource_group.main"