Open J3VS opened 3 years ago
Using the following configuration
data "aws_region" "default" {} data "aws_eks_cluster" "cluster" { name = module.eks.cluster_id } data "aws_eks_cluster_auth" "cluster" { name = module.eks.cluster_id } resource "aws_security_group" "worker_security_group" { name_prefix = "worker_security_group" vpc_id = var.vpc_id ingress { from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = var.cidr_blocks } } module "eks" { source = "terraform-aws-modules/eks/aws" version = "12.2.0" cluster_version = "1.17" cluster_name = var.cluster_name cluster_create_timeout = "1h" cluster_endpoint_private_access = true vpc_id = var.vpc_id subnets = var.subnets worker_groups = [ { instance_type = "m5.xlarge" asg_desired_capacity = 1 asg_max_size = 1 } ] worker_additional_security_group_ids = [aws_security_group.worker_security_group.id] } provider "kubernetes" { alias = "eks" host = data.aws_eks_cluster.cluster.endpoint token = data.aws_eks_cluster_auth.cluster.token cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) } module "alb_ingress_controller" { source = "iplabs/alb-ingress-controller/kubernetes" version = "3.4.0" providers = { kubernetes = kubernetes.eks } k8s_cluster_type = "eks" k8s_namespace = "kube-system" aws_region_name = data.aws_region.default.name k8s_cluster_name = data.aws_eks_cluster.cluster.name }
Running a terraform plan gives the following error
Error: Get "http://localhost/api/v1/namespaces/kube-system/serviceaccounts/aws-alb-ingress-controller": dial tcp [::1]:80: connect: connection refused Error: Get "http://localhost/apis/rbac.authorization.k8s.io/v1/clusterroles/aws-alb-ingress-controller": dial tcp [::1]:80: connect: connection refused
@J3VS Hi! What about this issue? Are you fixed it? I have exactly the same now but don't understand what is wrong. Can you explain how to fix it if you did it? Thanks a lot!
Using the following configuration
Running a terraform plan gives the following error