hashicorp / terraform-provider-aws

The AWS Provider enables Terraform to manage AWS resources.
https://registry.terraform.io/providers/hashicorp/aws
Mozilla Public License 2.0
9.73k stars 9.09k forks source link

[Bug]: Elastic beanstalk updated configuration fails to apply with Error: Provider produced inconsistent final plan #34242

Open boxstep opened 10 months ago

boxstep commented 10 months ago

Terraform Core Version

1.6.2

AWS Provider Version

5.23.0

Affected Resource(s)

data "aws_elasticsearch_domain" "local_domain" {
  domain_name = "${local.Name}-es"

  depends_on = [
    aws_elasticsearch_domain.elasticsearch
  ]
}

data "aws_sqs_queue" "general" {
  name = "${local.Name}-general"

depends_on = [ 
    aws_sqs_queue.sqs_general
   ]
}

resource "aws_elastic_beanstalk_environment" "beanstalkappenv" {
  name                = local.Name
  application         = aws_elastic_beanstalk_application.elasticapp.name
  solution_stack_name = local.solution_stack_name
  tier                = local.ebs_tier
  version_label       = aws_elastic_beanstalk_application_version.application_for_ebs.name

  dynamic "setting" {
    for_each = local.env_map
    content {
      namespace = "aws:elasticbeanstalk:application:environment"
      name      = setting.key
      value     = setting.value
    }
  }
depends_on = [
  module.cluster,
  aws_elasticsearch_domain.elasticsearch,
  aws_elasticache_replication_group.redis_cluster,
  aws_sqs_queue.sqs_general,
  aws_sqs_queue.sqs_proxy,
  aws_sqs_queue.sqs_user_invites
]

}

Expected Behavior

When updating existing environment ( any resource ), terraform should apply to existing environment.

Actual Behavior

When updating any resouce (rds or sqs etc) beanstalk tries to update itself and produces inconsistent plan. ( with all environment variables, this is 1 out of 50 )

 Error: Provider produced inconsistent final plan
│ 
│ When expanding the plan for
│ aws_elastic_beanstalk_environment.beanstalkappenv to include new values
│ learned so far during apply, provider "registry.terraform.io/hashicorp/aws"
│ produced an invalid new value for .setting: planned set element
│ cty.ObjectVal(map[string]cty.Value{"name":cty.StringVal("API_VERSION"),
│ "namespace":cty.StringVal("aws:elasticbeanstalk:application:environment"),
│ "resource":cty.NullVal(cty.String), "value":cty.StringVal("0.1")}) does not
│ correlate with any element in actual.
│ 
│ This is a bug in the provider, which should be reported in the provider's
│ own issue tracker.

Relevant Error/Panic Output Snippet

No response

Terraform Configuration Files

##Create role ONLY for Elastic Beanstalk
resource "aws_iam_instance_profile" "test_profile" {
  name = "${local.Name}-eu-west-1.api.profile"
  role = aws_iam_role.role.name
  path = "/app/"
}

data "aws_iam_policy_document" "assume_role" {
  statement {
    effect = "Allow"

    principals {
      type        = "Service"
      identifiers = ["ec2.amazonaws.com"]
    }

    actions = ["sts:AssumeRole"]
  }
}

resource "aws_iam_role" "role" {
  name               = "${local.Name}-eu-west-1.api.role"
  path               = "/app/"
  assume_role_policy = data.aws_iam_policy_document.assume_role.json
}

## Attach existing AWS policies to created role
resource "aws_iam_role_policy_attachment" "attach_policy" {
  role       = aws_iam_role.role.name
  policy_arn = "arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker"
}

resource "aws_iam_role_policy_attachment" "attach_policy2" {
  role       = aws_iam_role.role.name
  policy_arn = "arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier"
}

resource "aws_iam_role_policy_attachment" "attach_policy3" {
  role       = aws_iam_role.role.name
  policy_arn = "arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier"
}

resource "aws_iam_role_policy_attachment" "attach_policy4" {
  role       = aws_iam_role.role.name
  policy_arn = "arn:aws:iam::968921834094:policy/app/AonKMSWriteAccess"
}
resource "aws_iam_role_policy_attachment" "attach_policy5" {
  role       = aws_iam_role.role.name
  policy_arn = "arn:aws:iam::968921834094:policy/app/AonS3ReadAccess"
}

## EBS Application deploy
resource "aws_elastic_beanstalk_application_version" "application_for_ebs" {
  name        = local.Name
  application = aws_elastic_beanstalk_application.elasticapp.name
  description = local.Name
  bucket      = local.bucket_for_initial_application
  key         = local.beanstalk_initial_application
}

## EBS configuration
resource "aws_elastic_beanstalk_application" "elasticapp" {
  name = local.Name
}

resource "aws_elastic_beanstalk_environment" "beanstalkappenv" {
  name                = local.Name
  application         = aws_elastic_beanstalk_application.elasticapp.name
  solution_stack_name = local.solution_stack_name
  tier                = local.ebs_tier
  version_label       = aws_elastic_beanstalk_application_version.application_for_ebs.name

  setting {
    namespace = "aws:ec2:vpc"
    name      = "VPCId"
    value     = local.vpc_id
  }
  setting {
    namespace = "aws:autoscaling:launchconfiguration"
    name      = "SecurityGroups"                      
    value     = join(",", local.ebs_security_groups_ids)                              
  }
  setting {
    namespace = "aws:ec2:vpc"
    name      = "AssociatePublicIpAddress"
    value     =  local.ebs_public
  }
  setting {
    namespace = "aws:ec2:vpc"
    name      = "Subnets"
    value     = join(",", local.subnet_list)
  }
  setting {
    namespace = "aws:elasticbeanstalk:environment:process:default"
    name      = "MatcherHTTPCode"
    value     = local.healthcheck_httpcodes_to_match
  }
  setting {
    namespace = "aws:elasticbeanstalk:environment"
    name      = "LoadBalancerType"
    value     = local.loadbalancer_type
  }
  setting {
    namespace = "aws:autoscaling:launchconfiguration"
    name      = "InstanceType"
    value     = local.instance_type
  }
  setting {
    namespace = "aws:ec2:vpc"
    name      = "ELBScheme"
    value     = local.elb_scheme
  }
  setting {
    namespace = "aws:autoscaling:asg"
    name      = "MinSize"
    value     = local.autoscale_min
  }
  setting {
    namespace = "aws:autoscaling:asg"
    name      = "MaxSize"
    value     = local.autoscale_max
  }
  setting {
    namespace = "aws:elasticbeanstalk:healthreporting:system"
    name      = "SystemType"
    value     = local.enhanced_reporting_enabled
  }
  setting {
    namespace = "aws:elasticbeanstalk:container:php:phpini"
    name      = "document_root"
    value     = local.document_root
  }
  setting {
    namespace = "aws:elasticbeanstalk:container:php:phpini"
    name      = "memory_limit"
    value     = local.memory_limit
  }
  setting {
    namespace = "aws:elasticbeanstalk:environment:proxy"
    name      = "ProxyServer"
    value     = local.proxy_server
  }
  dynamic "setting" {
    for_each = local.env_map
    content {
      namespace = "aws:elasticbeanstalk:application:environment"
      name      = setting.key
      value     = setting.value
    }
  }
  setting {
    namespace = "aws:elasticbeanstalk:environment"
    name = "ServiceRole"
    value = local.service_role
  }

  setting {
    namespace = "aws:autoscaling:launchconfiguration"
    name = "IamInstanceProfile"
    value = aws_iam_instance_profile.test_profile.name
  }

  setting {
    namespace = "aws:elbv2:loadbalancer"
    name = "ManagedSecurityGroup"
    value = "sg-0e7cccc27e1231234"
  }
  setting {
    namespace = "aws:elbv2:loadbalancer"
    name = "SecurityGroups"
    value = "sg-0e7cccc27e1231234"
  }
  setting {
    namespace = "aws:elasticbeanstalk:environment:process:default"
    name      = "HealthCheckPath"
    value     = local.application_healthcheck_path
  }

  setting {
    namespace = "aws:autoscaling:launchconfiguration"
    name      = "EC2KeyName"
    value     = local.key_pair_name
  }

tags = {
    AppID       = local.AppID
    BU          = local.BU
    BUSub       = local.BUSub
    Lifecycle   = local.Lifecycle
    Description = local.Description
    Project     = local.Project
    Role        = local.Role
    Name        = local.Name
  }

depends_on = [
  module.cluster,
  aws_elasticsearch_domain.elasticsearch,
  aws_elasticache_replication_group.redis_cluster,
  aws_sqs_queue.sqs_general,
  aws_sqs_queue.sqs_proxy,
  aws_sqs_queue.sqs_user_invites
]
}

locals {

    solution_stack_name                = "64bit Amazon Linux 2 v3.5.12 running PHP 8.1"
    ebs_tier                           = "WebServer"
    ebs_security_groups_ids            = ["${local.db_access}","${local.abc_web_access}","${local.app_access}"]
    ebs_public                         = true
    healthcheck_httpcodes_to_match     = "200"
    loadbalancer_type                  = "application"
    instance_type                      = "t2.small"
    elb_scheme                         = "internal"
    autoscale_min                      = 1
    autoscale_max                      = 2
    enhanced_reporting_enabled         = "enhanced"
    enable_loadbalancer_logs           = false
    document_root                      = "/"
    memory_limit                       = "256M"
    proxy_server                       = "apache"
    service_role                       = "arn:aws:iam::968921834094:role/app/ElasticBeanstalkServiceRole"
    elb_security_groups                = ["${local.db_access}","${local.abc_web_access}","${local.app_access}"]
    application_healthcheck_path       = "/health-check"
    key_pair_name                      = ""
    beanstalk_initial_application      = "beanstalk-test-app.zip"
    bucket_for_initial_application     = "elasticbeanstalk-eu-west-1-968921834094"

    env_map  = {
            API_VERSION = "0.1"
            AQUA_SQS_QUEUE = data.aws_sqs_queue.proxy.url
            DB_HOST = data.aws_rds_cluster.rds_cluster.endpoint
            DB_HOST_READ = data.aws_rds_cluster.rds_cluster.reader_endpoint
            ELASTICSEARCH_HOST = data.aws_elasticsearch_domain.local_domain.endpoint
            REDIS_HOST = data.aws_elasticache_replication_group.redis_cluster_data.primary_endpoint_address
            SQS_QUEUE = data.aws_sqs_queue.general.url
            SQS_QUEUE_USER_INVITES = data.aws_sqs_queue.user-invites.url

}

Steps to Reproduce

If depends_on is left when terraform apply fails. If removed then it passes. It works when first time creating infrastructure, but when updating need to disable cause it fails.

data "aws_elasticsearch_domain" "local_domain" {
  domain_name = "${local.Name}-es"

  depends_on = [
   aws_elasticsearch_domain.elasticsearch
  ]
}
data "aws_sqs_queue" "general" {
  name = "${local.Name}-general"

  depends_on = [ 
    aws_sqs_queue.sqs_general
   ]
}

Debug Output

No response

Panic Output

No response

Important Factoids

No response

References

No response

Would you like to implement a fix?

None

github-actions[bot] commented 10 months ago

Community Note

Voting for Prioritization

Volunteering to Work on This Issue