Terraform code provisions an Azure Kubernetes Service (AKS)

elhay efrat
5 min readMar 22, 2023

--

This Terraform code provisions an AKS cluster with multiple node pools and configures its network profile, default node pool, and node pools for analyzer, clickhouse, and cs. It also provisions a virtual network with multiple subnets.

Usage

  1. Clone this repository.
  2. Navigate to the cloned directory.
  3. az login
  4. az extension add --name aks-preview && az extension update --name aks-preview && az feature register --namespace "Microsoft.ContainerService" --name "AzureOverlayPreview" && az provider register --namespace Microsoft.ContainerService
  5. Run terraform init to initialize the Terraform configuration.
  6. Run terraform plan to see the infrastructure plan.
  7. Run terraform apply to apply the infrastructure changes.

AKS Cluster

Virtual Network

Finally, a virtual network with five subnets is created, named core, ep, nat, public, and site, with the provided CIDR blocks.

Note that the commented-out code for the container registry is not used in this configuration.

Kubernetes Networking: Kubenet vs Azure CNI Overlay

Overview

Kubernetes networking is a critical aspect of managing a cluster. In this README, we compare two common Kubernetes networking solutions: Kubenet and Azure CNI Overlay.

Kubenet

Kubenet is a basic networking solution that is included with Kubernetes by default. It assigns IP addresses to pods from an address space logically different from the VNet. However, Kubenet has some limitations when it comes to scalability. Specifically, it can support up to 400 nodes and 250 pods per node.

Pros

  • Simple to set up, no additional configuration is required for pod networking.
  • Kubenet is a built-in solution in Kubernetes.

Cons

  • Limited scalability.
  • Complex networking configuration is required to set up Kubernetes Network Policies.

Azure CNI Overlay

Azure CNI Overlay is a more advanced networking solution that provides more flexibility and scalability. It assigns IP addresses to pods from the VNet address space, which eliminates the need for additional routing tables. Azure CNI Overlay can support up to 1000 nodes and 250 pods per node.

Pros

  • Higher scalability.
  • Performance on par with VMs in a VNet.
  • Supports Azure Network Policies, Calico, and Cilium.

Cons

  • Additional hop adds minor latency compared to Kubenet.
  • Only Linux is supported.

variable "customer_name" {
type = string
default = "lolas"
}

variable "node_resource_group" {
type = string
default = "nodes-rs"
}

variable "address_space" {
type = string
default = "11.0.0.0/22"
}


variable "subnet_address_core_prefix" {
type = string
default = "11.0.0.0/24"
}

variable "subnet_address_ep_prefix" {
type = string
default = "11.0.1.0/25"
}

variable "subnet_address_nat_prefix" {
type = string
default = "11.0.1.128/25"
}

variable "subnet_address_public_prefix" {
type = string
default = "11.0.2.0/27"
}

variable "subnet_address_site_prefix" {
type = string
default = "11.0.2.32/27"
}


variable "kubernetes_version" {
default = "1.25.4"
type = string
description = "what version of kubernetes to use"
}

variable "automatic_channel_upgrade" {
default = "stable"
type = string
description = "how aks will be upgraded "
}

variable "monitor_metrics" {
default = true
description = "is monitoring enabled"
}

variable "system_min_count" {
default = 1
}

variable "system_max_count" {
default = 5
}

variable "vm_size" {
default = "Standard_D2_v2"
}
variable "http_application_routing_enabled" {
default = true
}

# tags map
variable "tags_map" {
type = map(string)
default = {
env = "Production",
upgrade = "test",
version = "1.25.4",
last_version = "1.19.07"
ManagedBy = "Terraform"
}
}

variable "resource_group_location" {
default = "West Europe"
type = string
description = "Location of the resource group."
}

variable "min_count_user" {
default = 1
}

variable "max_count_user" {
default = 1
}

variable "vm_size_user" {
default = "Standard_D2_v2"
type = string
description = "Prefix of the resource group name that's combined with a random ID so name is unique in your Azure subscription."
}

variable "enable_auto_scaling" {
default = true
}

variable "sku_tier" {
default = "Paid"
}
# tags map
variable "tags_map_user" {
type = map(string)
default = {
env = "Production",
nodes = "node example",
upgrade = "test",
version = "1.25.4",
last_version = "1.19.07"
ManagedBy = "Terraform"
}
}



variable "min_count_analyzer" {
default = 1
}

variable "max_count_analyzer" {
default = 1
}


variable "min_count_clickhouse" {
default = 1
}

variable "max_count_clickhouse" {
default = 1
}


variable "min_count_cs" {
default = 1
}

variable "max_count_cs" {
default = 1
}

variable "min_count_ec" {
default = 1
}

variable "max_count_ec" {
default = 1
}

variable "min_count_kafka" {
default = 1
}

variable "max_count_kafka" {
default = 1
}

variable "min_count_pg" {
default = 1
}

variable "max_count_pg" {
default = 1
}

variable "min_count_management" {
default = 1
}

variable "max_count_management" {
default = 1
}

variable "min_count_site" {
default = 1
}

variable "max_count_site" {
default = 1
}
variable "vm_size_ep" {
default = "Standard_D2_v2"
}


variable "vm_size_site" {
default = "Standard_D2_v2"
}

variable "vm_size_pg" {
default = "Standard_D2_v2"
}

variable "vm_size_management" {
default = "Standard_D2_v2"
}


variable "vm_size_kafka" {
default = "Standard_D2_v2"
}


variable "vm_size_ec" {
default = "Standard_D2_v2"
}

variable "vm_size_cs" {
default = "Standard_D2_v2"
}

variable "vm_size_clickhouse" {
default = "Standard_D2_v2"
}


variable "vm_size_analyzer" {
default = "Standard_D2_v2"
}

# varaible "log_analytics_workspace_location"
resource "azurerm_resource_group" "rg" {
name = var.customer_name
location = var.resource_group_location
}

# Create a virtual network within the resource group
resource "azurerm_virtual_network" "vnet" {
depends_on = [azurerm_resource_group.rg]
name = "${var.customer_name}-vnet"
resource_group_name = azurerm_resource_group.rg.name
location = var.resource_group_location
address_space = [var.address_space]
}


resource "azurerm_subnet" "subnet_address_core" {
depends_on = [azurerm_virtual_network.vnet]
name = "core"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = [var.subnet_address_core_prefix]

}


resource "azurerm_subnet" "subnet_address_nat" {
depends_on = [azurerm_virtual_network.vnet]
name = "nat"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = [var.subnet_address_nat_prefix]

}

resource "azurerm_subnet" "subnet_address_public" {
depends_on = [azurerm_virtual_network.vnet]
name = "public"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = [var.subnet_address_public_prefix]

}

resource "azurerm_subnet" "subnet_address_site" {
depends_on = [azurerm_virtual_network.vnet]
name = "site"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.vnet.name
address_prefixes = [var.subnet_address_site_prefix]

}


resource "azurerm_kubernetes_cluster" "aks" {
depends_on = [azurerm_virtual_network.vnet]
name = var.customer_name
dns_prefix = var.customer_name
location = var.resource_group_location
resource_group_name = azurerm_resource_group.rg.name
kubernetes_version = var.kubernetes_version
automatic_channel_upgrade = var.automatic_channel_upgrade
http_application_routing_enabled = var.http_application_routing_enabled
sku_tier = var.sku_tier
node_resource_group = "${var.node_resource_group}-${var.customer_name}"


storage_profile {
blob_driver_enabled = true
disk_driver_enabled = true
file_driver_enabled = true
snapshot_controller_enabled = true
}

network_profile {
network_plugin = "azure"
network_plugin_mode = "Overlay"
ebpf_data_plane = "cilium"
pod_cidr = "192.168.0.0/16"

}

default_node_pool {
name = "${var.customer_name}sys"
min_count = var.system_min_count
vnet_subnet_id = azurerm_subnet.subnet_address_core.id
max_count = var.system_max_count
vm_size = var.vm_size
enable_auto_scaling = var.enable_auto_scaling
tags = var.tags_map
}

identity {
type = "SystemAssigned"
}

tags = var.tags_map
}





resource "azurerm_kubernetes_cluster_node_pool" "kafka" {
depends_on = [azurerm_kubernetes_cluster.aks]
name = substr("${var.customer_name}kafka", 0, min(12, length("${var.customer_name}kafka")))
mode = "User"
kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id
min_count = var.min_count_kafka
max_count = var.max_count_kafka
vnet_subnet_id = azurerm_subnet.subnet_address_core.id
vm_size = var.vm_size_kafka
enable_auto_scaling = var.enable_auto_scaling
tags = var.tags_map
}

resource "azurerm_kubernetes_cluster_node_pool" "management" {
depends_on = [azurerm_kubernetes_cluster.aks]
name = substr("${var.customer_name}management", 0, min(12, length("${var.customer_name}management")))
mode = "User"
kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id
min_count = var.min_count_management
max_count = var.max_count_management
vnet_subnet_id = azurerm_subnet.subnet_address_core.id
vm_size = var.vm_size_management
enable_auto_scaling = var.enable_auto_scaling
tags = var.tags_map
}





output "client_certificate" {
depends_on = [azurerm_kubernetes_cluster.aks]
value = azurerm_kubernetes_cluster.aks.kube_config.0.client_certificate
sensitive = true
}

output "kube_config" {
depends_on = [azurerm_kubernetes_cluster.aks]
value = azurerm_kubernetes_cluster.aks.kube_config_raw

sensitive = true
}

--

--