I have been trying to create an EKS cluster with self managed nodes on AWS using Terraform but I can't get my Kubernetes Ingress to create a load balancer. There are no errors but no load balancer gets created, it just times out.
I did create a load balancer manually in my account first and verified that the load balancer role is present. The policy AWSElasticLoadBalancingServiceRolePolicy
is accessed when my Terraform code runs.
I have relied heavily on this tutorial
tfvars:
aws_region = "ap-southeast-1"
domain = "*.mydomain.com"
cluster_name = "my-tf-eks-cluster"
vpc_id = "vpc-0d7700e26db6b3e21"
app_subnet_ids = "subnet-03c1e8c57110c92e0, subnet-0413e8bf24cb32595, subnet-047dcce0b810f0fbd"
// gateway subnet IDs
Terraform code:
terraform {
}
provider "aws" {
region = var.aws_region
version = "~> 2.8"
}
data "aws_acm_certificate" "default" {
domain = var.domain
statuses = ["ISSUED"]
}
resource "kubernetes_service_account" "alb-ingress" {
metadata {
name = "alb-ingress-controller"
namespace = "kube-system"
labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}
automount_service_account_token = true
}
resource "kubernetes_cluster_role" "alb-ingress" {
metadata {
name = "alb-ingress-controller"
labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}
rule {
api_groups = ["", "extensions"]
resources = ["configmaps", "endpoints", "events", "ingresses", "ingresses/status", "services"]
verbs = ["create", "get", "list", "update", "watch", "patch"]
}
rule {
api_groups = ["", "extensions"]
resources = ["nodes", "pods", "secrets", "services", "namespaces"]
verbs = ["get", "list", "watch"]
}
}
resource "kubernetes_cluster_role_binding" "alb-ingress" {
metadata {
name = "alb-ingress-controller"
labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "alb-ingress-controller"
}
subject {
kind = "ServiceAccount"
name = "alb-ingress-controller"
namespace = "kube-system"
}
}
resource "kubernetes_deployment" "alb-ingress" {
metadata {
name = "alb-ingress-controller"
labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
namespace = "kube-system"
}
spec {
selector {
match_labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}
template {
metadata {
labels = {
"app.kubernetes.io/name" = "alb-ingress-controller"
}
}
spec {
volume {
name = kubernetes_service_account.alb-ingress.default_secret_name
secret {
secret_name = kubernetes_service_account.alb-ingress.default_secret_name
}
}
container {
# This is where you change the version when Amazon comes out with a new version of the ingress controller
image = "docker.io/amazon/aws-alb-ingress-controller:v1.1.8"
name = "alb-ingress-controller"
args = [
"--ingress-class=alb",
"--cluster-name=${var.cluster_name}",
"--aws-vpc-id=${var.vpc_id}",
"--aws-region=${var.aws_region}"
]
volume_mount {
name = kubernetes_service_account.alb-ingress.default_secret_name
mount_path = "/var/run/secrets/kubernetes.io/serviceaccount"
read_only = true
}
}
service_account_name = "alb-ingress-controller"
}
}
}
}
resource "kubernetes_ingress" "main" {
metadata {
name = "main-ingress"
annotations = {
"alb.ingress.kubernetes.io/scheme" = "internet-facing"
"kubernetes.io/ingress.class" = "alb"
"alb.ingress.kubernetes.io/subnets" = "${var.app_subnet_ids}"
"alb.ingress.kubernetes.io/certificate-arn" = "${data.aws_acm_certificate.default.arn}"
"alb.ingress.kubernetes.io/listen-ports" = <<JSON
[
{"HTTP": 80},
{"HTTPS": 443}
]
JSON
"alb.ingress.kubernetes.io/actions.ssl-redirect" = <<JSON
{
"Type": "redirect",
"RedirectConfig": {
"Protocol": "HTTPS",
"Port": "443",
"StatusCode": "HTTP_301"
}
}
JSON
}
}
spec {
rule {
host = "app.xactpos.com"
http {
path {
backend {
service_name = "ssl-redirect"
service_port = "use-annotation"
}
path = "/*"
}
path {
backend {
service_name = "app-service1"
service_port = 80
}
path = "/service1"
}
path {
backend {
service_name = "app-service2"
service_port = 80
}
path = "/service2"
}
}
}
rule {
host = "api.xactpos.com"
http {
path {
backend {
service_name = "ssl-redirect"
service_port = "use-annotation"
}
path = "/*"
}
path {
backend {
service_name = "api-service1"
service_port = 80
}
path = "/service3"
}
path {
backend {
service_name = "api-service2"
service_port = 80
}
path = "/service4"
}
}
}
}
wait_for_load_balancer = true
}
I am by no means a K8s expert, but I went through the Terraform code, and the only thing that I see as an option that could possibly help you debug this seems to be the wait_for_load_balancer
option in the kubernetes_ingress
resource. From the documentation:
Terraform will wait for the load balancer to have at least 1 endpoint before considering the resource created.
Maybe the output will be more clear in that case (if the creation fails for some reason), or you might find out why it's not creating an LB.
I had the kubernetes ingress pointing to the application subnet instead of the gateway subnet. I think that was the problem.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With