diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 2df03770b2..23e6290153 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -8,17 +8,22 @@ AZIMG = mcr.microsoft.com/azure-cli AZCLI ?= docker run --rm -v $(AZCFG):/root/.azure -v $(KUBECFG):/root/.kube -v $(SSH):/root/.ssh -v $(PWD):/root/tmpsrc $(AZIMG) az # overrideable defaults -AUTOUPGRADE ?= patch -K8S_VER ?= 1.30 -NODE_COUNT ?= 2 -NODE_COUNT_WIN ?= $(NODE_COUNT) -NODEUPGRADE ?= NodeImage -OS ?= linux # Used to signify if you want to bring up a windows nodePool on byocni clusters -OS_SKU ?= Ubuntu -OS_SKU_WIN ?= Windows2022 -REGION ?= westus2 -VM_SIZE ?= Standard_B2s -VM_SIZE_WIN ?= Standard_B2s +AUTOUPGRADE ?= patch +K8S_VER ?= 1.30 +NODE_COUNT ?= 2 +NODE_COUNT_WIN ?= $(NODE_COUNT) +NODEUPGRADE ?= NodeImage +OS ?= linux # Used to signify if you want to bring up a windows nodePool on byocni clusters +OS_SKU ?= Ubuntu +OS_SKU_WIN ?= Windows2022 +REGION ?= westus2 +VM_SIZE ?= Standard_B2s +VM_SIZE_WIN ?= Standard_B2s +IP_TAG ?= FirstPartyUsage=/DelegatedNetworkControllerTest +IP_PREFIX ?= serviceTaggedIp +PUBLIC_IP_ID ?= /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/publicIPAddresses +PUBLIC_IPv4 ?= $(PUBLIC_IP_ID)/$(IP_PREFIX)-$(CLUSTER)-v4 +PUBLIC_IPv6 ?= $(PUBLIC_IP_ID)/$(IP_PREFIX)-$(CLUSTER)-v6 KUBE_PROXY_JSON_PATH ?= ./kube-proxy.json # overrideable variables @@ -43,6 +48,23 @@ azcfg: ## Set the $AZCLI to use aks-preview @$(AZCLI) extension add --name aks-preview --yes @$(AZCLI) extension update --name aks-preview +ip: + $(AZCLI) network public-ip create --name $(IP_PREFIX)-$(CLUSTER)-$(IPVERSION) \ + --resource-group $(GROUP) \ + --allocation-method Static \ + --ip-tags $(IP_TAG) \ + --location $(REGION) \ + --sku Standard \ + --tier Regional \ + --version IP$(IPVERSION) + +ipv4: + @$(MAKE) ip IPVERSION=v4 + +ipv6: + @$(MAKE) ip IPVERSION=v6 + + set-kubeconf: ## Adds the kubeconf for $CLUSTER $(AZCLI) aks get-credentials -n $(CLUSTER) -g $(GROUP) @@ -72,6 +94,22 @@ vars: ## Show the input vars configured for the cluster commands rg-up: ## Create resource group @$(AZCLI) group create --location $(REGION) --name $(GROUP) +ip: + $(AZCLI) network public-ip create --name $(IP_PREFIX)-$(CLUSTER)-$(IPVERSION) \ + --resource-group $(GROUP) \ + --allocation-method Static \ + --ip-tags $(IP_TAG) \ + --location $(REGION) \ + --sku Standard \ + --tier Regional \ + --version IP$(IPVERSION) + +ipv4: + @$(MAKE) ip IPVERSION=v4 + +ipv6: + @$(MAKE) ip IPVERSION=v6 + rg-down: ## Delete resource group $(AZCLI) group delete -g $(GROUP) --yes @@ -89,7 +127,6 @@ overlay-net-up: ## Create vnet, nodenet subnets $(AZCLI) network vnet create -g $(GROUP) -l $(REGION) --name $(VNET) --address-prefixes 10.0.0.0/8 -o none $(AZCLI) network vnet subnet create -g $(GROUP) --vnet-name $(VNET) --name nodenet --address-prefix 10.10.0.0/16 -o none - ##@ AKS Clusters byocni-up: swift-byocni-up ## Alias to swift-byocni-up @@ -97,15 +134,15 @@ cilium-up: swift-cilium-up ## Alias to swift-cilium-up up: swift-up ## Alias to swift-up -nodesubnet-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an NodeSubnet BYO CNI cluster without kube-proxy +nodesubnet-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an NodeSubnet BYO CNI cluster without kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ --max-pods 250 \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --os-sku $(OS_SKU) \ @@ -114,14 +151,14 @@ nodesubnet-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an NodeSubne --yes @$(MAKE) set-kubeconf -overlay-byocni-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster +overlay-byocni-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ @@ -134,14 +171,14 @@ ifeq ($(OS),windows) endif @$(MAKE) set-kubeconf -overlay-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster without kube-proxy +overlay-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay BYO CNI cluster without kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ @@ -151,14 +188,14 @@ overlay-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an Overlay BYO --yes @$(MAKE) set-kubeconf -overlay-cilium-up: rg-up overlay-net-up ## Brings up an Overlay Cilium cluster +overlay-cilium-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay Cilium cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips (PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --network-plugin-mode overlay \ @@ -168,14 +205,14 @@ overlay-cilium-up: rg-up overlay-net-up ## Brings up an Overlay Cilium cluster --yes @$(MAKE) set-kubeconf -overlay-up: rg-up overlay-net-up ## Brings up an Overlay AzCNI cluster +overlay-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ @@ -184,14 +221,14 @@ overlay-up: rg-up overlay-net-up ## Brings up an Overlay AzCNI cluster --yes @$(MAKE) set-kubeconf -swift-byocni-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI cluster +swift-byocni-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -203,14 +240,14 @@ ifeq ($(OS),windows) endif @$(MAKE) set-kubeconf -swift-byocni-nokubeproxy-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI cluster without kube-proxy +swift-byocni-nokubeproxy-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT BYO CNI cluster without kube-proxy, add managed identity and public ip $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -220,14 +257,14 @@ swift-byocni-nokubeproxy-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI clus --yes @$(MAKE) set-kubeconf -swift-cilium-up: rg-up swift-net-up ## Bring up a SWIFT Cilium cluster +swift-cilium-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT Cilium cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/CiliumDataplanePreview \ @@ -237,14 +274,14 @@ swift-cilium-up: rg-up swift-net-up ## Bring up a SWIFT Cilium cluster --yes @$(MAKE) set-kubeconf -swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster +swift-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -252,7 +289,7 @@ swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster --yes @$(MAKE) set-kubeconf -swiftv2-multitenancy-cluster-up: rg-up +swiftv2-multitenancy-cluster-up: rg-up ipv4 $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --network-plugin azure \ --network-plugin-mode overlay \ @@ -260,16 +297,18 @@ swiftv2-multitenancy-cluster-up: rg-up --nodepool-name "mtapool" \ --node-vm-size $(VM_SIZE) \ --node-count 2 \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --nodepool-tags fastpathenabled=true \ --no-ssh-key \ --yes @$(MAKE) set-kubeconf -swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster +swiftv2-dummy-cluster-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --no-ssh-key \ --yes @$(MAKE) set-kubeconf @@ -277,14 +316,14 @@ swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster # The below Vnet Scale clusters are currently only in private preview and available with Kubernetes 1.28 # These AKS clusters can only be created in a limited subscription listed here: # https://dev.azure.com/msazure/CloudNativeCompute/_git/aks-rp?path=/resourceprovider/server/microsoft.com/containerservice/flags/network_flags.go&version=GBmaster&line=134&lineEnd=135&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents -vnetscale-swift-byocni-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster +vnetscale-swift-byocni-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -293,14 +332,14 @@ vnetscale-swift-byocni-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale --yes @$(MAKE) set-kubeconf -vnetscale-swift-byocni-nokubeproxy-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster without kube-proxy +vnetscale-swift-byocni-nokubeproxy-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster without kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -310,14 +349,14 @@ vnetscale-swift-byocni-nokubeproxy-up: rg-up vnetscale-swift-net-up ## Bring up --yes @$(MAKE) set-kubeconf -vnetscale-swift-cilium-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT Cilium cluster +vnetscale-swift-cilium-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT Cilium cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/CiliumDataplanePreview \ @@ -327,14 +366,14 @@ vnetscale-swift-cilium-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale --yes @$(MAKE) set-kubeconf -vnetscale-swift-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT AzCNI cluster +vnetscale-swift-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku basic \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -342,13 +381,14 @@ vnetscale-swift-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT --yes @$(MAKE) set-kubeconf -windows-cniv1-up: rg-up overlay-net-up ## Bring up a Windows CNIv1 cluster +windows-cniv1-up: rg-up ipv4 overlay-net-up ## Bring up a Windows CNIv1 cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --windows-admin-password $(WINDOWS_PASSWORD) \ --windows-admin-username $(WINDOWS_USERNAME) \ @@ -358,13 +398,14 @@ windows-cniv1-up: rg-up overlay-net-up ## Bring up a Windows CNIv1 cluster @$(MAKE) windows-nodepool-up @$(MAKE) set-kubeconf -linux-cniv1-up: rg-up overlay-net-up ## Bring up a Linux CNIv1 cluster +linux-cniv1-up: rg-up ipv4 overlay-net-up ## Bring up a Linux CNIv1 cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --max-pods 250 \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ @@ -373,13 +414,14 @@ linux-cniv1-up: rg-up overlay-net-up ## Bring up a Linux CNIv1 cluster --yes @$(MAKE) set-kubeconf -dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only +dualstack-overlay-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin azure \ --network-plugin-mode overlay \ --subscription $(SUB) \ @@ -389,13 +431,14 @@ dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay clu --yes @$(MAKE) set-kubeconf -dualstack-overlay-byocni-up: rg-up overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster +dualstack-overlay-byocni-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin none \ --network-plugin-mode overlay \ --subscription $(SUB) \ @@ -405,13 +448,14 @@ dualstack-overlay-byocni-up: rg-up overlay-net-up ## Brings up an dualstack Over --yes @$(MAKE) set-kubeconf -cilium-dualstack-up: rg-up overlay-net-up ## Brings up a Cilium Dualstack Overlay cluster with Linux node only +cilium-dualstack-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up a Cilium Dualstack Overlay cluster with Linux node only $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin azure \ --network-plugin-mode overlay \ --network-dataplane cilium \ @@ -422,13 +466,14 @@ cilium-dualstack-up: rg-up overlay-net-up ## Brings up a Cilium Dualstack Overla --yes @$(MAKE) set-kubeconf -dualstack-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up a Dualstack overlay BYOCNI cluster with Linux node only and no kube-proxy +dualstack-byocni-nokubeproxy-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up a Dualstack overlay BYOCNI cluster with Linux node only and no kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin none \ --network-plugin-mode overlay \ --subscription $(SUB) \