Closed allanian closed 2 years ago
issue was solved with changed instance type to t3.large, thx all.
I'm going to lock this issue because it has been closed for 30 days β³. This helps our maintainers find and focus on the active issues. If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
Description
Hello, i'm trying to use this example with complete node_group, but its stucking on file
In AWS EC2 Auto Scaling Groups on activity tab, i see this messages:
Your version of Terraform is out of date! The latest version is 1.1.8. You can update by downloading from https://www.terraform.io/downloads.htm
=========================
System
========================
region = "us-east-2"
k8s version
cluster_version = "1.22" name="api-demo" instance_type = "t2.micro"
provider "aws" { region = local.region }
provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec { api_version = "client.authentication.k8s.io/v1alpha1" command = "aws"
This requires the awscli to be installed locally where Terraform is executed
} }
locals { name = var.name cluster_version = var.cluster_version region = var.region
tags = { ClusterName = local.name GithubRepo = "terraform-aws-eks" GithubOrg = "terraform-aws-modules" } } terraform { backend "s3" { bucket = "api-s3-bucket" key = "terraform-api-eks/terraform.tfstate" region = "us-east-2" dynamodb_table = "api-s3-bucket-terraform-state-locks"
encrypt = true
}
data "aws_availability_zones" "available" {} data "aws_caller_identity" "current" {}
################################################################################
EKS Module
################################################################################
module "eks" { source = "./terraform-aws-eks"
cluster_name = local.name cluster_version = local.cluster_version cluster_endpoint_private_access = true cluster_endpoint_public_access = true
cluster_addons = { coredns = { resolve_conflicts = "OVERWRITE" } kube-proxy = {} vpc-cni = { resolve_conflicts = "OVERWRITE" } }
cluster_encryption_config = [{ provider_key_arn = aws_kms_key.eks.arn resources = ["secrets"] }]
vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets
Self managed node groups will not automatically create the aws-auth configmap so we need to
create_aws_auth_configmap = true manage_aws_auth_configmap = true
Extend cluster security group rules
cluster_security_group_additional_rules = { egress_nodes_ephemeral_ports_tcp = { description = "To node 1025-65535" protocol = "tcp" from_port = 1025 to_port = 65535 type = "egress" source_node_security_group = true } }
Extend node-to-node security group rules
node_security_group_additional_rules = { ingress_self_all = { description = "Node to node all ports/protocols" protocol = "-1" from_port = 0 to_port = 0 type = "ingress" self = true } egress_all = { description = "Node all egress" protocol = "-1" from_port = 0 to_port = 0 type = "egress" cidr_blocks = ["0.0.0.0/0"] ipv6_cidr_blocks = ["::/0"] } }
self_managed_node_group_defaults = { disk_size = 20 update_launch_template_default_version = true instance_type = "t3.micro" min_size = 1 max_size = 1 desired_size = 1 }
self_managed_node_groups = {
workers node group
} aws_auth_roles = var.aws_auth_roles aws_auth_users = var.aws_auth_users aws_auth_accounts = var.aws_auth_accounts tags = local.tags }
################################################################################
Supporting Resources
################################################################################
module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 3.0"
name = local.name cidr = "10.0.0.0/16" azs = data.aws_availability_zones.available.names
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true
enable_flow_log = true create_flow_log_cloudwatch_iam_role = true create_flow_log_cloudwatch_log_group = true
public_subnet_tags = { "kubernetes.io/cluster/${local.name}" = "shared" "kubernetes.io/role/elb" = 1 }
private_subnet_tags = { "kubernetes.io/cluster/${local.name}" = "shared" "kubernetes.io/role/internal-elb" = 1 }
tags = local.tags }
resource "aws_security_group" "additional" { name_prefix = "${local.name}-additional" vpc_id = module.vpc.vpc_id
ingress { from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = [ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", ] }
tags = local.tags }
resource "aws_kms_key" "eks" { description = "EKS Secret Encryption Key" deletion_window_in_days = 7 enable_key_rotation = true
tags = local.tags }
data "aws_ami" "eks_default" { most_recent = true owners = ["amazon"]
filter { name = "name" values = ["amazon-eks-node-${local.cluster_version}-v*"] } }
data "aws_ami" "eks_default_bottlerocket" { most_recent = true owners = ["amazon"]
filter { name = "name" values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"] } }
resource "tls_private_key" "this" { algorithm = "RSA" }
resource "aws_key_pair" "this" { key_name = local.name public_key = tls_private_key.this.public_key_openssh }
resource "aws_kms_key" "ebs" { description = "Customer managed key to encrypt self managed node group volumes" deletion_window_in_days = 7 policy = data.aws_iam_policy_document.ebs.json }
resource "aws_ec2_capacity_reservation" "targeted" { instance_type = var.instance_type instance_platform = "Linux/UNIX"
availability_zone = data.aws_availability_zones.available.names
availability_zone = "${local.region}a" instance_count = 1 instance_match_criteria = "targeted" }
This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
data "aws_iam_policy_document" "ebs" {
Copy of default KMS policy that lets you manage it
statement { sid = "Enable IAM User Permissions" actions = ["kms:"] resources = [""]
}
Required for EKS
statement { sid = "Allow service-linked role use of the CMK" actions = [ "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt", "kms:GenerateDataKey", "kms:DescribeKey" ] resources = ["*"]
}
statement { sid = "Allow attachment of persistent resources" actions = ["kms:CreateGrant"] resources = ["*"]
} }
module.eks.module.self_managed_node_group["workers"].aws_autoscaling_group.this[0]: Still creating... [9m30s elapsed] module.eks.module.self_managed_node_group["workers"].aws_autoscaling_group.this[0]: Still creating... [9m40s elapsed] module.eks.module.self_managed_node_group["workers"].aws_autoscaling_group.this[0]: Still creating... [9m50s elapsed] module.eks.module.self_managed_node_group["workers"].aws_autoscaling_group.this[0]: Still creating... [10m0s elapsed] β· β Error: "api-demo-20220420151848941900000001": Waiting up to 10m0s: Need at least 3 healthy instances in ASG, have 0. Most recent activity: { β ActivityId: "3586011e-5ae9-5faa-f317-f8f88b756be8", β AutoScalingGroupARN: "arn:aws:autoscaling:us-east-2:474660338267:autoScalingGroup:5a19719d-622f-4c40-a3f6-ac7ec0ed1539:autoScalingGroupName/api-demo-20220420151848941900000001", β AutoScalingGroupName: "api-demo-20220420151848941900000001", β Cause: "At 2022-04-20T15:28:19Z an instance was started in response to a difference between desired and actual capacity, increasing the capacity from 0 to 3.", β Description: "Launching a new EC2 instance. Status Reason: The requested configuration is currently not supported. Please check the documentation for supported configurations. Launching EC2 instance failed.", β Details: "{\"Subnet ID\":\"subnet-077c67e52e59e237d\",\"Availability Zone\":\"us-east-2a\"}", β EndTime: 2022-04-20 15:28:22 +0000 UTC, β Progress: 100, β StartTime: 2022-04-20 15:28:22.103 +0000 UTC, β StatusCode: "Failed", β StatusMessage: "The requested configuration is currently not supported. Please check the documentation for supported configurations. Launching EC2 instance failed." β } β β with module.eks.module.self_managed_node_group["workers"].aws_autoscaling_group.this[0], β on terraform-aws-eks/modules/self-managed-node-group/main.tf line 265, in resource "aws_autoscaling_group" "this": β 265: resource "aws_autoscaling_group" "this" {