hashicorp / terraform-provider-aws

The AWS Provider enables Terraform to manage AWS resources.
https://registry.terraform.io/providers/hashicorp/aws
Mozilla Public License 2.0
9.83k stars 9.18k forks source link

[Bug]: volumes and tags - not as expected #34816

Open marafa-sugarcrm opened 11 months ago

marafa-sugarcrm commented 11 months ago

Terraform Core Version

v1.5.7

AWS Provider Version

v5.29.0

Affected Resource(s)

Expected Behavior

just works ™️

Actual Behavior

a myriad of unexpected behaviours ranging from

Relevant Error/Panic Output Snippet

No response

Terraform Configuration Files



resource "aws_iam_instance_profile" "cluster-cluster" {
  name = "cluster-${var.region_env}"
  role = aws_iam_role.cluster-cluster.name
}

resource "aws_iam_role" "cluster-cluster" {
  name = "cluster-${var.region_env}"

  assume_role_policy = <<EOF
{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Action": "sts:AssumeRole",
            "Principal": {
               "Service": "ec2.amazonaws.com"
            },
            "Effect": "Allow",
            "Sid": ""
        }
    ]
}
EOF

  tags = {
    app = "testing"
  }
}

resource "aws_iam_role_policy" "cluster-cluster" {
  name = "cluster-${var.region_env}"
  role = aws_iam_role.cluster-cluster.id

  policy = <<POLICY
{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Sid": "Statement",
            "Effect": "Allow",
            "Action": [
                "s3:ListBucket",
                "s3:GetObject",
                "s3:DeleteObject",
                "s3:AbortMultipartUpload",
                "s3:PutObjectTagging",
                "s3:ListMultipartUploadParts",
                "s3:PutObject"
            ],
            "Resource": [
                "arn:aws:s3:::project-cluster-backup-${var.k8s_cluster_name}/*",
                "arn:aws:s3:::project-cluster-backup-${var.k8s_cluster_name}",
                "arn:aws:s3:::project-worker-data-load-temp-${var.k8s_cluster_name}/*",
                "arn:aws:s3:::project-worker-data-load-temp-${var.k8s_cluster_name}"
            ]
        }
    ]
}
POLICY

}

resource "aws_instance" "cluster-mdw" {
  ami           = var.gp_master_ami_id
  instance_type = var.gp_mdw_instance_type
  subnet_id     = element(module.services_vpc.private_subnets, 0)
  root_block_device {
    encrypted = true
    tags = {
      Name = "cluster-mdw"
      app  = "testing"
    }
  }
  ebs_optimized          = true
  vpc_security_group_ids = [aws_security_group.cluster-master-sg.id]
  iam_instance_profile   = aws_iam_instance_profile.cluster-cluster.name

  tags = {
    Name = "cluster-mdw-${var.region_env}"
    app  = "testing"
  }
  # volume_tags = {
  #   Name = "cluster-mdw-${var.region_env}"
  #   app  = "testing"
  # }
}

resource "aws_instance" "cluster-smdw" {
  ami           = var.gp_master_ami_id
  instance_type = var.gp_smdw_instance_type
  subnet_id     = element(module.services_vpc.private_subnets, 0)
  root_block_device {
    encrypted = true
    tags = {
      Name = "cluster-smdw"
      app  = "testing"
    }
  }
  ebs_optimized          = true
  vpc_security_group_ids = [aws_security_group.cluster-master-sg.id]
  iam_instance_profile   = aws_iam_instance_profile.cluster-cluster.name

  tags = {
    Name = "cluster-smdw-${var.region_env}"
    app  = "testing"
  }

  # volume_tags = {
  #   Name = "cluster-smdw-${var.region_env}"
  #   app  = "testing"
  # }
}

resource "aws_instance" "cluster-sdw1" {
  ami           = var.gp_segment_ami_id
  instance_type = var.gp_segment_instance_type
  subnet_id     = element(module.services_vpc.private_subnets, 0)
  root_block_device {
    encrypted = true
    tags = {
      Name = "cluster-sdw1"
      app  = "testing"
    }
  }
  ebs_optimized          = true
  vpc_security_group_ids = [aws_security_group.cluster-worker-sg.id]
  iam_instance_profile   = aws_iam_instance_profile.cluster-cluster.name

  tags = {
    Name = "cluster-sdw1-${var.region_env}"
    app  = "testing"
  }
  # volume_tags = {
  #   Name = "cluster-sdw1-${var.region_env}"
  #   app  = "testing"
  # }
}

resource "aws_instance" "cluster-sdw2" {
  ami           = var.gp_segment_ami_id
  instance_type = var.gp_segment_instance_type
  subnet_id     = element(module.services_vpc.private_subnets, 0)
  root_block_device {
    encrypted = true
    tags = {
      Name = "cluster-sdw2"
      app  = "testing"
    }
  }
  ebs_optimized          = true
  vpc_security_group_ids = [aws_security_group.cluster-worker-sg.id]
  iam_instance_profile   = aws_iam_instance_profile.cluster-cluster.name

  tags = {
    Name = "cluster-sdw2-${var.region_env}"
    app  = "testing"
  }
  # volume_tags = {
  #   Name = "cluster-sdw2-${var.region_env}"
  #   app  = "testing"
  # }
}

resource "aws_ebs_volume" "cluster-mdw-h" {
  availability_zone = "${var.region}a"
  size              = var.gp_master_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-mdw-h"
    app  = "testing"
  }
}

resource "aws_volume_attachment" "ebs-mdw-h" {
  device_name = "/dev/sdh"
  volume_id   = aws_ebs_volume.cluster-mdw-h.id
  instance_id = aws_instance.cluster-mdw.id
}

resource "aws_ebs_volume" "cluster-smdw-h" {
  availability_zone = "${var.region}a"
  size              = var.gp_master_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-smdw-h"
    app  = "testing"
  }
}

resource "aws_volume_attachment" "ebs-smdw-h" {
  device_name = "/dev/sdh"
  volume_id   = aws_ebs_volume.cluster-smdw-h.id
  instance_id = aws_instance.cluster-smdw.id
}

resource "aws_ebs_volume" "cluster-sdw1-h" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw1-h"
    app  = "testing"

  }
}

resource "aws_ebs_volume" "cluster-sdw1-i" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw1-i"
    app  = "testing"
  }
}

resource "aws_ebs_volume" "cluster-sdw1-j" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw1-j"
    app  = "testing"
  }
}

resource "aws_ebs_volume" "cluster-sdw1-k" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw1-k"
    app  = "testing"
  }
}

resource "aws_volume_attachment" "ebs-sdw1-h" {
  device_name = "/dev/sdh"
  volume_id   = aws_ebs_volume.cluster-sdw1-h.id
  instance_id = aws_instance.cluster-sdw1.id
}

resource "aws_volume_attachment" "ebs-sdw1-i" {
  device_name = "/dev/sdi"
  volume_id   = aws_ebs_volume.cluster-sdw1-i.id
  instance_id = aws_instance.cluster-sdw1.id
}

resource "aws_volume_attachment" "ebs-sdw1-j" {
  device_name = "/dev/sdj"
  volume_id   = aws_ebs_volume.cluster-sdw1-j.id
  instance_id = aws_instance.cluster-sdw1.id
}

resource "aws_volume_attachment" "ebs-sdw1-k" {
  device_name = "/dev/sdk"
  volume_id   = aws_ebs_volume.cluster-sdw1-k.id
  instance_id = aws_instance.cluster-sdw1.id
}

resource "aws_ebs_volume" "cluster-sdw2-h" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw2-h"
    app  = "testing"
  }
}

resource "aws_ebs_volume" "cluster-sdw2-i" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw2-i"
    app  = "testing"
  }
}

resource "aws_ebs_volume" "cluster-sdw2-j" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw2-j"
    app  = "testing"
  }
}

resource "aws_ebs_volume" "cluster-sdw2-k" {
  availability_zone = "${var.region}a"
  size              = var.gp_segment_ebs_size
  encrypted         = true

  tags = {
    Name = "cluster-sdw2-k"
    app  = "testing"
  }
}

resource "aws_volume_attachment" "ebs-sdw2-h" {
  device_name = "/dev/sdh"
  volume_id   = aws_ebs_volume.cluster-sdw2-h.id
  instance_id = aws_instance.cluster-sdw2.id
}

resource "aws_volume_attachment" "ebs-sdw2-i" {
  device_name = "/dev/sdi"
  volume_id   = aws_ebs_volume.cluster-sdw2-i.id
  instance_id = aws_instance.cluster-sdw2.id
}

resource "aws_volume_attachment" "ebs-sdw2-j" {
  device_name = "/dev/sdj"
  volume_id   = aws_ebs_volume.cluster-sdw2-j.id
  instance_id = aws_instance.cluster-sdw2.id
}

resource "aws_volume_attachment" "ebs-sdw2-k" {
  device_name = "/dev/sdk"
  volume_id   = aws_ebs_volume.cluster-sdw2-k.id
  instance_id = aws_instance.cluster-sdw2.id
}

resource "aws_security_group" "cluster-master-sg" {
  name        = "cluster-master-sg-${var.region_env}"
  description = "cluster masters"
  vpc_id      = module.services_vpc.vpc_id

  tags = {
    Name = "cluster-master-sg"
    app  = "testing"
  }
}

resource "aws_security_group" "cluster-worker-sg" {
  name        = "cluster-workers-sg-${var.region_env}"
  description = "cluster workers"
  vpc_id      = module.services_vpc.vpc_id

  tags = {
    Name = "cluster-worker-sg"
    app  = "testing"
  }
}

resource "aws_security_group_rule" "master-egress" {
  type              = "egress"
  to_port           = 0
  from_port         = 0
  protocol          = "-1"
  cidr_blocks       = ["0.0.0.0/0"]
  security_group_id = aws_security_group.cluster-master-sg.id
}

resource "aws_security_group_rule" "worker-egress" {
  type              = "egress"
  to_port           = 0
  from_port         = 0
  protocol          = "-1"
  cidr_blocks       = ["0.0.0.0/0"]
  security_group_id = aws_security_group.cluster-worker-sg.id
}

resource "aws_security_group_rule" "k8s-to-worker" {
  type              = "ingress"
  from_port         = 0
  to_port           = 65535
  protocol          = "tcp"
  cidr_blocks       = [var.k8s_cluster_cidr]
  security_group_id = aws_security_group.cluster-worker-sg.id
}

resource "aws_security_group_rule" "k8s-to-master" {
  type              = "ingress"
  from_port         = 0
  to_port           = 65535
  protocol          = "tcp"
  cidr_blocks       = [var.k8s_cluster_cidr]
  security_group_id = aws_security_group.cluster-master-sg.id
}

resource "aws_security_group_rule" "master-to-worker" {
  type                     = "ingress"
  from_port                = 0
  to_port                  = 65535
  protocol                 = "-1"
  source_security_group_id = aws_security_group.cluster-master-sg.id
  security_group_id        = aws_security_group.cluster-worker-sg.id
}

resource "aws_security_group_rule" "worker-to-master" {
  type                     = "ingress"
  from_port                = 0
  to_port                  = 65535
  protocol                 = "-1"
  source_security_group_id = aws_security_group.cluster-worker-sg.id
  security_group_id        = aws_security_group.cluster-master-sg.id
}

resource "aws_security_group_rule" "master-to-master" {
  type              = "ingress"
  from_port         = 0
  to_port           = 65535
  protocol          = "-1"
  security_group_id = aws_security_group.cluster-master-sg.id
  self              = true
}

resource "aws_security_group_rule" "worker-to-worker" {
  type              = "ingress"
  from_port         = 0
  to_port           = 65535
  protocol          = "-1"
  security_group_id = aws_security_group.cluster-worker-sg.id
  self              = true
}

output "cluster-mdw" {
  value = aws_instance.cluster-mdw.private_ip
}

output "cluster-smdw" {
  value = aws_instance.cluster-smdw.private_ip
}

output "cluster-sdw1" {
  value = aws_instance.cluster-sdw1.private_ip
}

output "cluster-sdw2" {
  value = aws_instance.cluster-sdw2.private_ip
}

### Steps to Reproduce

i have a 4 node cluster
- primary (mdw)
- secondary (smdw)
- 2 worker nodes (sdw*)
all nodes have a root volume
- primary and secondary have an additional node
- worker nodes have 5 additional volumes

there were no tags on the volumes.
so i started off with adding tags to the worker nodes' volumes. the plan said there was nothing to change. (see this [note](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance#volume_tags) on why i didnt start off with volume_tags) 
i then added volume_tags to the aws_instance . now that took. but the tags on all volumes were the ones meant for the root volumes, which are slightly different
i then modified (and edited and modified) until i got this final version. i had to run apply twice coz on the 2nd last run, i managed to clear out all tags but the root volumes didnt have any tags. that was fixed on the final apply

### Debug Output

_No response_

### Panic Output

_No response_

### Important Factoids

_No response_

### References

_No response_

### Would you like to implement a fix?

None
github-actions[bot] commented 11 months ago

Community Note

Voting for Prioritization

Volunteering to Work on This Issue

ckinasch commented 5 months ago

fetching data from aws_ebs_volume returns an empty map of tags in ~> 5.49. Reverted version to 5.48 and fixed the problem

moritz-makandra commented 1 month ago

I could fix this issue for me by not using capital letters as the first character of the tag name