I followed this example and I am stuck with the following status:
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 2m59s fargate-scheduler Your AWS account is currently blocked and thus cannot launch any Fargate pods
To Reproduce
Steps to reproduce the behavior:
Navigate to the serverless tutorial
Run it
See error
Code
terraform {
required_version = "~> 1.2.4"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.21.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "~>2.12.0"
}
helm = {
source = "hashicorp/helm"
version = ">= 2.5" # "~>2.6.0"
}
null = {
source = "hashicorp/null"
version = ">= 3.0" # "~>3.1.0"
}
}
}
provider "aws" {
profile = "syncifyEKS-terraform-admin"
region = local.region
default_tags {
tags = {
Environment = "Staging"
Owner = "BT-Compliance"
Terraform = "True"
}
}
}
#
# Housekeeping
#
locals {
project_name = "syncify-dev"
cluster_name = "${local.project_name}-eks-cluster"
cluster_version = "1.22"
region = "us-west-1"
}
/*
The following 2 data resources are used get around the fact that we have to wait
for the EKS cluster to be initialised before we can attempt to authenticate.
*/
data "aws_eks_cluster" "default" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "default" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.default.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.default.token
}
provider "helm" {
kubernetes {
host = data.aws_eks_cluster.default.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.default.token
}
}
#############################################################################################
#############################################################################################
# Create EKS Cluster
#############################################################################################
#############################################################################################
# Create VPC for EKS Cluster
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.14.2"
name = local.cluster_name
cidr = "10.0.0.0/16"
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24"] #, "10.0.3.0/24"]
public_subnets = ["10.0.101.0/24", "10.0.102.0/24"] #, "10.0.103.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
one_nat_gateway_per_az = false
manage_default_network_acl = true
default_network_acl_tags = { Name = "${local.cluster_name}-default" }
manage_default_route_table = true
default_route_table_tags = { Name = "${local.cluster_name}-default" }
manage_default_security_group = true
default_security_group_tags = { Name = "${local.cluster_name}-default" }
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "18.26.3"
cluster_name = local.cluster_name
cluster_version = local.cluster_version
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
cluster_addons = {
kube-proxy = {
addon_version = data.aws_eks_addon_version.this["kube-proxy"].version
resolve_conflicts = "OVERWRITE"
}
vpc-cni = {
addon_version = data.aws_eks_addon_version.this["vpc-cni"].version
resolve_conflicts = "OVERWRITE"
}
}
# manage_aws_auth_configmap = true
fargate_profiles = {
default = {
name = "default"
selectors = [
{ namespace = "default" }
]
}
kube_system = {
name = "kube-system"
selectors = [
{ namespace = "kube-system" }
]
}
}
}
data "aws_eks_addon_version" "this" {
for_each = toset(["coredns", "kube-proxy", "vpc-cni"])
addon_name = each.value
kubernetes_version = module.eks.cluster_version
most_recent = true
}
################################################################################
# Modify EKS CoreDNS Deployment
################################################################################
data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}
locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}
# Separate resource so that this is only ever executed once
resource "null_resource" "remove_default_coredns_deployment" {
triggers = {}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}
# We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon
# However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
}
resource "null_resource" "modify_kube_dns" {
triggers = {}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}
# We are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}"
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
depends_on = [
null_resource.remove_default_coredns_deployment
]
}
################################################################################
# CoreDNS Helm Chart (self-managed)
################################################################################
resource "helm_release" "coredns" {
name = "coredns"
namespace = "kube-system"
create_namespace = false
description = "CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"
chart = "coredns"
version = "1.19.4"
repository = "https://coredns.github.io/helm"
force_update = true
recreate_pods = true
# For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
values = [
<<-EOT
image:
repository: 602401143452.dkr.ecr.us-west-1.amazonaws.com/eks/coredns
tag: ${data.aws_eks_addon_version.this["coredns"].version}
deployment:
name: coredns
annotations:
eks.amazonaws.com/compute-type: fargate
service:
name: kube-dns
annotations:
eks.amazonaws.com/compute-type: fargate
podAnnotations:
eks.amazonaws.com/compute-type: fargate
EOT
]
depends_on = [
null_resource.modify_kube_dns
]
}
Expected behavior
coredns pods should have gotten scheduled.
hi @arnav13081994 - this appears to be an issue with your account and is not related to the code provided here. Please reach out to your AWS support to resolve
Describe the bug
I followed this example and I am stuck with the following status:
To Reproduce Steps to reproduce the behavior:
serverless
tutorialCode
Expected behavior coredns pods should have gotten scheduled.
Any help would be greatly appreciated.