Failed to create aks using existing vnet - terraform

I'm trying to create aks using terraform; the catch is I have already vnet and subnet created, I need to have the cluster created in that network.
When executing this code I'm getting an error:
locals {
environment = "prod"
resource_group = "hnk_rg_poc"
vnet_subnet_cidr = ["10.3.1.0/24"]
}
#Existing vnet with address space "10.3.1.0/24"
data "azurerm_virtual_network" "existing-vnet" {
name = "${var.vnet}"
resource_group_name = local.resource_group
}
#subnets
resource "azurerm_subnet" "vnet_subnet_id" {
name = "${var.vnet_subnet_id}"
resource_group_name = local.resource_group
address_prefixes = local.vnet_subnet_cidr
virtual_network_name = data.azurerm_virtual_network.existing-vnet.name
}
vnet_subnet_id = data.azurerm_subnet.vnet_subnet_id.id

As you are already having a existing Vnet and Subnet to be used by the AKS cluster , you have to use data block instead of resource block for the subnet.
You can use the below to create a basic aks cluster using your existing Vnet and Subnet:
provider "azurerm" {
features {}
}
#local vars
locals {
environment = "test"
resource_group = "resource_group_name"
name_prefix = "name-aks"
}
#Existing vnet with address space
data "azurerm_virtual_network" "base" {
name = "existing-vnet"
resource_group_name = local.resource_group
}
#existing subnet to be used by aks
data "azurerm_subnet" "aks" {
name = "existing-subnet"
resource_group_name = local.resource_group
virtual_network_name = data.azurerm_virtual_network.base.name
}
#kubernetes_cluster
resource "azurerm_kubernetes_cluster" "base" {
name = "${local.name_prefix}-${local.environment}"
location = data.azurerm_virtual_network.base.location
resource_group_name = data.azurerm_virtual_network.base.resource_group_name
dns_prefix = "dns-${local.name_prefix}-${local.environment}"
network_profile {
network_plugin = "azure"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
vnet_subnet_id = data.azurerm_subnet.aks.id
}
identity {
type = "SystemAssigned"
}
}
Output: (Terraform Plan)

Related

Attach an AKS Cluster to an existing VNET using Terraform

I am new to the DevOps and Terraform domain, and I would like to ask the following. I have already create a VNET (using portal) which called "myVNET" in the resource group "Networks". I am trying to implement a AKS cluster using Terraform. My main.tf file is below
provider "azurerm" {
subscription_id = var.subscription_id
client_id = var.client_id
client_secret = var.client_secret
tenant_id = var.tenant_id
features {}
}
resource "azurerm_resource_group" "MyPlatform" {
name = var.resourcename
location = var.location
}
resource "azurerm_kubernetes_cluster" "aks-cluster" {
name = var.clustername
location = azurerm_resource_group.MyPlatform.location
resource_group_name = azurerm_resource_group.MyPlatform.name
dns_prefix = var.dnspreffix
default_node_pool {
name = "default"
node_count = var.agentnode
vm_size = var.size
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
network_profile {
network_plugin = "azure"
load_balancer_sku = "standard"
network_policy = "calico"
}
}
My question is the following, how can I attach my cluster to my VNET?
You do that by assigning the subnet ID to the node pool vnet_subnet_id.
data "azurerm_subnet" "subnet" {
name = "<name of the subnet to run in>"
virtual_network_name = "MyVNET"
resource_group_name = "Networks"
}
...
resource "azurerm_kubernetes_cluster" "aks-cluster" {
...
default_node_pool {
name = "default"
...
vnet_subnet_id = data.azurerm_subnet.subnet.id
}
...
You can reference this existing module to build your own module if not use it directly.

AKS via Terraform Error: Code="CustomRouteTableWithUnsupportedMSIType"

trying to create private aks via terraform using existing vnet and subnet, was able to create cluster suddenly below error came.
│ Error: creating Managed Kubernetes Cluster "demo-azwe-aks-cluster" (Resource Group "demo-azwe-aks-rg"): containerservice.ManagedClustersClient#CreateOrUpdate: Failure sending request: StatusCode=0 -- Original Error: Code="CustomRouteTableWithUnsupportedMSIType" Message="Clusters using managed identity type SystemAssigned do not support bringing your own route table. Please see https://aka.ms/aks/customrt for more information"
│
│ with azurerm_kubernetes_cluster.aks_cluster,
│ on aks_cluster.tf line 30, in resource "azurerm_kubernetes_cluster" "aks_cluster":
│ 30: resource "azurerm_kubernetes_cluster" "aks_cluster" {
# Provision AKS Cluster
resource "azurerm_kubernetes_cluster" "aks_cluster" {
name = "${var.global-prefix}-${var.cluster-id}-${var.environment}-azwe-aks-cluster"
location = "${var.location}"
resource_group_name = azurerm_resource_group.aks_rg.name
dns_prefix = "${var.global-prefix}-${var.cluster-id}-${var.environment}-azwe-aks-cluster"
kubernetes_version = data.azurerm_kubernetes_service_versions.current.latest_version
node_resource_group = "${var.global-prefix}-${var.cluster-id}-${var.environment}-azwe-aks-nrg"
private_cluster_enabled = true
default_node_pool {
name = "dpool"
vm_size = "Standard_DS2_v2"
orchestrator_version = data.azurerm_kubernetes_service_versions.current.latest_version
availability_zones = [1, 2, 3]
enable_auto_scaling = true
max_count = 2
min_count = 1
os_disk_size_gb = 30
type = "VirtualMachineScaleSets"
vnet_subnet_id = data.azurerm_subnet.aks.id
node_labels = {
"nodepool-type" = "system"
"environment" = "${var.environment}"
"nodepoolos" = "${var.nodepool-os}"
"app" = "system-apps"
}
tags = {
"nodepool-type" = "system"
"environment" = "dev"
"nodepoolos" = "linux"
"app" = "system-apps"
}
}
# Identity (System Assigned or Service Principal)
identity {
type = "SystemAssigned"
}
# Add On Profiles
addon_profile {
azure_policy {enabled = true}
oms_agent {
enabled = true
log_analytics_workspace_id = azurerm_log_analytics_workspace.insights.id
}
}
# Create Azure AD Group in Active Directory for AKS Admins
resource "azuread_group" "aks_administrators" {
name = "${azurerm_resource_group.aks_rg.name}-cluster-administrators"
description = "Azure AKS Kubernetes administrators for the ${azurerm_resource_group.aks_rg.name}-cluster."
}
RBAC and Azure AD Integration Block
role_based_access_control {
enabled = true
azure_active_directory {
managed = true
admin_group_object_ids = [azuread_group.aks_administrators.id]
}
}
# Linux Profile
linux_profile {
admin_username = "ubuntu"
ssh_key {
key_data = file(var.ssh_public_key)
}
}
# Network Profile
network_profile {
network_plugin = "kubenet"
load_balancer_sku = "Standard"
}
tags = {
Environment = "prod"
}
}
You are trying to create a Private AKS cluster with existing Vnet and existing subnet for both AKS and firewall ,So as per the error "CustomRouteTableWithUnsupportedMSIType" you need a managed identity to create a route table and a role assigned to it i.e. Network Contributor.
Network profile will be azure instead of kubenet as you are using azure vnet and its subnet.
Add on's you can use as per your requirement but please ensure you have used data block for workspace otherwise you can directly give the resourceID. So, instead of
log_analytics_workspace_id = azurerm_log_analytics_workspace.insights.id
you can use
log_analytics_workspace_id = "/subscriptions/SubscriptionID/resourcegroups/resourcegroupname/providers/microsoft.operationalinsights/workspaces/workspacename"
Example to create private cluster with existng vnet and subnets (I haven't added add on's):
provider "azurerm" {
features {}
}
#resource group as this will be referred to in managed identity creation
data "azurerm_resource_group" "base" {
name = "resourcegroupname"
}
#exisiting vnet
data "azurerm_virtual_network" "base" {
name = "ansuman-vnet"
resource_group_name = data.azurerm_resource_group.base.name
}
#exisiting subnets
data "azurerm_subnet" "aks" {
name = "akssubnet"
resource_group_name = data.azurerm_resource_group.base.name
virtual_network_name = data.azurerm_virtual_network.base.name
}
data "azurerm_subnet" "firewall" {
name = "AzureFirewallSubnet"
resource_group_name = data.azurerm_resource_group.base.name
virtual_network_name = data.azurerm_virtual_network.base.name
}
#user assigned identity required to create route table
resource "azurerm_user_assigned_identity" "base" {
resource_group_name = data.azurerm_resource_group.base.name
location = data.azurerm_resource_group.base.location
name = "mi-name"
}
#role assignment required to create route table
resource "azurerm_role_assignment" "base" {
scope = data.azurerm_resource_group.base.id
role_definition_name = "Network Contributor"
principal_id = azurerm_user_assigned_identity.base.principal_id
}
#route table
resource "azurerm_route_table" "base" {
name = "rt-aksroutetable"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
}
#route
resource "azurerm_route" "base" {
name = "dg-aksroute"
resource_group_name = data.azurerm_resource_group.base.name
route_table_name = azurerm_route_table.base.name
address_prefix = "0.0.0.0/0"
next_hop_type = "VirtualAppliance"
next_hop_in_ip_address = azurerm_firewall.base.ip_configuration.0.private_ip_address
}
#route table association
resource "azurerm_subnet_route_table_association" "base" {
subnet_id = data.azurerm_subnet.aks.id
route_table_id = azurerm_route_table.base.id
}
#firewall
resource "azurerm_public_ip" "base" {
name = "pip-firewall"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_firewall" "base" {
name = "fw-akscluster"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
ip_configuration {
name = "ip-firewallakscluster"
subnet_id = data.azurerm_subnet.firewall.id
public_ip_address_id = azurerm_public_ip.base.id
}
}
#kubernetes_cluster
resource "azurerm_kubernetes_cluster" "base" {
name = "testakscluster"
location = data.azurerm_resource_group.base.location
resource_group_name = data.azurerm_resource_group.base.name
dns_prefix = "dns-testakscluster"
private_cluster_enabled = true
network_profile {
network_plugin = "azure"
outbound_type = "userDefinedRouting"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
vnet_subnet_id = data.azurerm_subnet.aks.id
}
identity {
type = "UserAssigned"
user_assigned_identity_id = azurerm_user_assigned_identity.base.id
}
depends_on = [
azurerm_route.base,
azurerm_role_assignment.base
]
}
Output:
(Terraform Plan)
(Terraform Apply)
(Azure portal)
Note: Its bydefault that azure requires the subnet name for firewall to be AzureFirewallSubnet. If you are using subnet with any other name for firewall creation then it will error out. So, Please ensure to name the existing subnet to be used by firewall to be AzureFirewallSubnet.

Unable to create Azure AKS Cluster using existing VNET and Subnets

I'm trying to build an AKS cluster in Azure using Terraform. However, I do not want AKS deployed into its own VNET and Subnet, I already have built a subnet within a vnet that I want it to use. When trying to just give it the subnet ID, I get an overlapping CIDER issue. My networking is:
VNET: 10.0.0.0/16
Subnets: 10.0.1.0/24, 10.0.2.0/24, and 10.0.3.0/24. I need AKS to use the 10.0.1.0./24 subnet within this VNET. However, my Terraform config is trying to use a CIDR of 10.0.0.0/16, which is an obviouis conflict. I don't know how to fix this issue inside of Terraform, with the portal I can just choose the vnet/subnet for AKS. Below is my Terraform configuration which generates the error:
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=2.46.0"
}
}
}
# Configure the Microsoft Azure Provider
provider "azurerm" {
features {}
subscription_id = "####"
tenant_id = "####"
}
locals {
azure_location = "East US"
azure_location_short = "eastus"
}
resource "azurerm_resource_group" "primary_vnet_resource_group" {
name = "vnet-prod-002-eastus-001"
location = local.azure_location
}
resource "azurerm_virtual_network" "primary_vnet_virtual_network" {
name = "vnet_primary_eastus-001"
location = local.azure_location
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_space = ["10.0.0.0/16"]
}
resource "azurerm_subnet" "aks-subnet" {
name = "snet-aks-prod-002-eastus-001"
# location = local.azure_location
virtual_network_name = azurerm_virtual_network.primary_vnet_virtual_network.name
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_prefixes = ["10.0.1.0/24"]
}
output "aks_subnet_id" {
value = azurerm_subnet.aks-subnet.id
}
resource "azurerm_subnet" "application-subnet" {
name = "snet-app-prod-002-eastus-001"
# location = local.azure_location
virtual_network_name = azurerm_virtual_network.primary_vnet_virtual_network.name
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_prefixes = ["10.0.2.0/24"]
}
resource "azurerm_subnet" "postgres-subnet" {
name = "snet-postgres-prod-002-eastus-001"
# location = local.azure_location
virtual_network_name = azurerm_virtual_network.primary_vnet_virtual_network.name
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
address_prefixes = ["10.0.3.0/24"]
}
output "postgres_subnet_id" {
value = azurerm_subnet.postgres-subnet.id
}
resource "azurerm_kubernetes_cluster" "aks-prod-002-eastus-001" {
name = "aks-prod-002-eastus-001"
location = local.azure_location
resource_group_name = azurerm_resource_group.primary_vnet_resource_group.name
dns_prefix = "aks-prod-002-eastus-001"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_DS2_v2"
vnet_subnet_id = azurerm_subnet.aks-subnet.id
}
network_profile {
network_plugin = "azure"
}
identity {
type = "SystemAssigned"
}
addon_profile {
aci_connector_linux {
enabled = false
}
azure_policy {
enabled = false
}
http_application_routing {
enabled = false
}
oms_agent {
enabled = false
}
}
}
I'm not a Terraform expert and really need a hand with this if anyone knows how to accomplish this. I've been up and down the documentation and I can find a way to specify the subnet id but that's about all I can do. If I don't specify the subnet id then everything is built, but there is a new vnet created which is what I don't want.
Thanks in advance
All the following properties need to be set under network_profile as following:
network_profile {
network_plugin = "azure"
network_policy = "azure"
service_cidr = "10.0.4.0/24"
dns_service_ip = "10.0.4.10"
docker_bridge_cidr = "172.17.0.1/16"
}
These were missed, I hope this helps anyone who is having problems similar to mine.
More info about this block can be found here: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#network_plugin

Azure Terraform | remote-exec Bastion Host VM

Example Reffered: https://medium.com/devops-dudes/how-to-setup-completely-private-azure-kubernetes-service-aks-clusters-with-azure-private-links-b800a5a6776f
##
# Configure the Azure Provider
##
provider "azurerm" {
version = "=2.8.0"
features {}
}
##
# Define variables for location, service principal for AKS and Bastion VM Admin
##
variable "location" {
type = map(string)
default = {
value = "West Europe"
suffix = "westeurope" # The corresponding value of location that is used by Azure in naming AKS resource groups
}
}
variable "aks_service_principal" {
type = map(string)
/* Set value with .tfvars
{
client_id = "xxxxx"
client_secret = "yyyyy"
}
*/
}
variable "bastion_admin" {
type = map(string)
/* Set value with .tfvars
{
username = "xxxxx"
password = "yyyyy"
}
*/
}
##
# Create a resource group for the azure resources
##
resource "azurerm_resource_group" "my_rg" {
name = "rg-private-aks-demo"
location = var.location.value
}
##
# Create Vnet and subnet for the AKS cluster
##
resource "azurerm_virtual_network" "vnet_cluster" {
name = "vnet-private-aks-demo"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
address_space = ["10.1.0.0/16"]
}
resource "azurerm_subnet" "snet_cluster" {
name = "snet-private-aks-demo"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_cluster.name
address_prefixes = ["10.1.0.0/24"]
# Enforce network policies to allow Private Endpoint to be added to the subnet
enforce_private_link_endpoint_network_policies = true
}
##
# Create Vnet and subnet for the Bastion VM
##
resource "azurerm_virtual_network" "vnet_bastion" {
name = "vnet-bastion-demo"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
address_space = ["10.0.0.0/16"]
}
resource "azurerm_subnet" "snet_bastion_vm" {
name = "snet-bastion-demo"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_bastion.name
address_prefixes = ["10.0.0.0/24"]
}
resource "azurerm_subnet" "snet_azure_bastion_service" {
# The subnet name cannot be changed as the azure bastion host depends on the same
name = "AzureBastionSubnet"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_bastion.name
address_prefixes = ["10.0.1.0/24"]
}
##
# Create Vnet peering for the bastion VM to be able to access the cluster Vnet and IPs
##
resource "azurerm_virtual_network_peering" "peering_bastion_cluster" {
name = "peering_bastion_cluster"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_bastion.name
remote_virtual_network_id = azurerm_virtual_network.vnet_cluster.id
}
resource "azurerm_virtual_network_peering" "peering_cluster_bastion" {
name = "peering_cluster_bastion"
resource_group_name = azurerm_resource_group.my_rg.name
virtual_network_name = azurerm_virtual_network.vnet_cluster.name
remote_virtual_network_id = azurerm_virtual_network.vnet_bastion.id
}
##
# Create the AKS Cluster
##
resource "azurerm_kubernetes_cluster" "my_aks" {
name = "aks-my-cluster"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
dns_prefix = "aks-cluster"
# Make the cluster private
private_cluster_enabled = true
# Improve security using Azure AD, K8s roles and rolebindings.
# Each Azure AD user can gets his personal kubeconfig and permissions managed through AD Groups and Rolebindings
role_based_access_control {
enabled = true
}
# Enable Kubernetes Dashboard, if needed
addon_profile {
kube_dashboard {
enabled = true
}
}
# To prevent CIDR collition with the 10.0.0.0/16 Vnet
network_profile {
network_plugin = "kubenet"
docker_bridge_cidr = "192.167.0.1/16"
dns_service_ip = "192.168.1.1"
service_cidr = "192.168.0.0/16"
pod_cidr = "172.16.0.0/22"
}
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
vnet_subnet_id = azurerm_subnet.snet_cluster.id
}
service_principal {
client_id = var.aks_service_principal.client_id
client_secret = var.aks_service_principal.client_secret
}
}
##
# Link the Bastion Vnet to the Private DNS Zone generated to resolve the Server IP from the URL in Kubeconfig
##
resource "azurerm_private_dns_zone_virtual_network_link" "link_bastion_cluster" {
name = "dnslink-bastion-cluster"
# The Terraform language does not support user-defined functions, and so only the functions built in to the language are available for use.
# The below code gets the private dns zone name from the fqdn, by slicing the out dns prefix
private_dns_zone_name = join(".", slice(split(".", azurerm_kubernetes_cluster.my_aks.private_fqdn), 1, length(split(".", azurerm_kubernetes_cluster.my_aks.private_fqdn))))
resource_group_name = "MC_${azurerm_resource_group.my_rg.name}_${azurerm_kubernetes_cluster.my_aks.name}_${var.location.suffix}"
virtual_network_id = azurerm_virtual_network.vnet_bastion.id
}
##
# Create a Bastion VM
##
resource "azurerm_network_interface" "bastion_nic" {
name = "nic-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.snet_bastion_vm.id
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_linux_virtual_machine" "example" {
name = "vm-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
size = "Standard_D2_v2"
admin_username = var.bastion_admin.username
admin_password = var.bastion_admin.password
disable_password_authentication = false
network_interface_ids = [
azurerm_network_interface.bastion_nic.id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
}
##
# Create an Azure Bastion Service to access the Bastion VM
##
resource "azurerm_public_ip" "pip_azure_bastion" {
name = "pip-azure-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_bastion_host" "azure-bastion" {
name = "azure-bastion"
location = var.location.value
resource_group_name = azurerm_resource_group.my_rg.name
ip_configuration {
name = "configuration"
subnet_id = azurerm_subnet.snet_azure_bastion_service.id
public_ip_address_id = azurerm_public_ip.pip_azure_bastion.id
}
}
This example works fine, where in only the Bastion Host can access Kubernetes API.
Now I am trying to do helm-release from Terraform. BUT I have do run helm release from within the Bastion Host.
Is there any way to run helm-release on remote?

Create a virtual network using terraform for AKS cluster

When creating a new aks cluster using azurerm_kubernetes_cluster new resource group created automatically called - MC_%RESOURCE_GROUP_NAME%_%CLUSTER_NAME%_%LOCATION% which contains all the networking resources for the cluster.
My goal is to use one Vnet which will hold of these resources (AKS networking resource) and further resources such as VM's and Azure cache.
As I see it, there are 2 options to resolve this issue -
Using terraform output and create the new resource inside the Vnet created automatically, this is not ideal since I have zero effect on the networking created.
Create my own Vnet and attach AKS cluster to this Vnet.
Currently, I'm trying to approach 2 but no luck so far.
Any thoughts, recommendations, and examples would be appreciated.
It doesnt really work like that. As you saw, azure will create a brand new RG with a bunch of AKS related stuff in there. But the cluster itsef will go in whatever RG you want it to be in. And then using the azure network plugin, your pods can then go onto the VNEts you specify, and have access to other resources in your Vnet.
Heres a sample how I built it when dev'ing for my needs, should be a good starter for you.
resource "azurerm_resource_group" "rg" {
name = var.rgname
location = var.location
tags = var.default-tags
}
resource "azurerm_virtual_network" "vnet" {
name = "vnet"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
address_space = [var.ipspace]
tags = var.default-tags
}
resource "azurerm_subnet" "vmsubnet" {
name = "vmsubnet"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefix = var.vmsubnet
}
resource "azurerm_subnet" "akspodssubnet" {
name = "akspodssubnet"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefix = var.akspodssubnet
}
resource "azurerm_kubernetes_cluster" "k8s" {
name = "K8Scluster"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name # th RG the single cluster entity goes is
dns_prefix = "k8s"
node_resource_group = "K8S${azurerm_resource_group.rg.name}" # all the k8s' entities must be in fdifferent RG than where the cluster object itself is
api_server_authorized_ip_ranges = ["REDACTED"]
#enable_pod_security_policy = true
kubernetes_version = "1.15.7"
default_node_pool {
name = "default"
type = "AvailabilitySet"
vm_size = var.vmsize # Standard_DC2s_v2 Standard_B1ms
enable_node_public_ip = false
enable_auto_scaling = false
os_disk_size_gb = 30
node_count = 1
vnet_subnet_id = azurerm_subnet.akspodssubnet.id
}
addon_profile {
kube_dashboard { enabled = true }
}
network_profile {
network_plugin = "azure"
network_policy = "azure"
load_balancer_sku = "standard"
service_cidr = var.aksservicecidr
docker_bridge_cidr = var.dockercidrip
dns_service_ip = var.aksdns
}
linux_profile {
admin_username = var.sudouser
ssh_key { key_data = var.sshpubkey }
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
tags = var.default-tags
}
output "client_certificate" {
value = azurerm_kubernetes_cluster.k8s.kube_config.0.client_certificate
}
output "kube_config" {
value = azurerm_kubernetes_cluster.k8s.kube_config_raw
}

Resources