Jenkins is out of date due to user data. It looks like the user data that currently exists contains some references to Datadog (which we no longer use) and the current code adds automatically attaching the EIP. As Jenkins can affect our deployment pipeline we should coordinate with @escattone on when we can have some downtime here.
Terraform will perform the following actions:
# aws_autoscaling_group.ci must be replaced
+/- resource "aws_autoscaling_group" "ci" {
~ arn = "arn:aws:autoscaling:us-west-2:178589013767:autoScalingGroup:11c041e2-c460-4a81-b482-ebb9f0fd0648:autoScalingGroupName/ci-mdn - ci-mdn-20181023224918223000000001" -> (known after apply)
~ availability_zones = [
- "us-west-2a",
- "us-west-2b",
- "us-west-2c",
] -> (known after apply)
- capacity_rebalance = false -> null
~ default_cooldown = 300 -> (known after apply)
desired_capacity = 1
enabled_metrics = [
"GroupDesiredCapacity",
"GroupInServiceInstances",
"GroupMaxSize",
"GroupMinSize",
"GroupPendingInstances",
"GroupStandbyInstances",
"GroupTerminatingInstances",
"GroupTotalInstances",
]
force_delete = true
+ force_delete_warm_pool = false
health_check_grace_period = 1800
health_check_type = "EC2"
~ id = "ci-mdn - ci-mdn-20181023224918223000000001" -> (known after apply)
~ launch_configuration = "ci-mdn-20181023224918223000000001" -> (known after apply)
load_balancers = [
"ci-elb-mdn",
]
- max_instance_lifetime = 0 -> null
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
~ name = "ci-mdn - ci-mdn-20181023224918223000000001" -> (known after apply) # forces replacement
~ name_prefix = "ci-mdn - ci-mdn-" -> (known after apply)
protect_from_scale_in = false
~ service_linked_role_arn = "arn:aws:iam::178589013767:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling" -> (known after apply)
- suspended_processes = [] -> null
- target_group_arns = [] -> null
- termination_policies = [] -> null
vpc_zone_identifier = [
"subnet-0a496f50",
"subnet-4890e603",
"subnet-aca6f2d5",
]
wait_for_capacity_timeout = "10m"
tag {
key = "Name"
propagate_at_launch = true
value = "ci.us-west-2.mdn.mozit.cloud"
}
tag {
key = "Region"
propagate_at_launch = true
value = "us-west-2"
}
- timeouts {}
}
# aws_launch_configuration.ci must be replaced
+/- resource "aws_launch_configuration" "ci" {
~ arn = "arn:aws:autoscaling:us-west-2:178589013767:launchConfiguration:38ba09f8-ac2f-477e-8ebd-2edcae24574b:launchConfigurationName/ci-mdn-20181023224918223000000001" -> (known after apply)
associate_public_ip_address = true
~ ebs_optimized = false -> (known after apply)
enable_monitoring = false
iam_instance_profile = "ci-mdn-us-west-2"
~ id = "ci-mdn-20181023224918223000000001" -> (known after apply)
image_id = "ami-01e0cf6e025c036e4"
instance_type = "m5.xlarge"
key_name = "mdn"
~ name = "ci-mdn-20181023224918223000000001" -> (known after apply)
name_prefix = "ci-mdn-"
security_groups = [
"sg-ad2ce0de",
]
~ user_data = "e1067db88298d9cafca35f173b4fca029a2a4b2c" -> "464e5a1fc96b6a2d47c1471afff6cd05fc415bd1" # forces replacement
- vpc_classic_link_security_groups = [] -> null
+ ebs_block_device {
+ delete_on_termination = (known after apply)
+ device_name = (known after apply)
+ encrypted = (known after apply)
+ iops = (known after apply)
+ no_device = (known after apply)
+ snapshot_id = (known after apply)
+ throughput = (known after apply)
+ volume_size = (known after apply)
+ volume_type = (known after apply)
}
+ metadata_options {
+ http_endpoint = (known after apply)
+ http_put_response_hop_limit = (known after apply)
+ http_tokens = (known after apply)
}
~ root_block_device {
delete_on_termination = false
~ encrypted = false -> (known after apply)
~ iops = 0 -> (known after apply)
~ throughput = 0 -> (known after apply)
volume_size = 250
volume_type = "gp2"
}
}
Plan: 2 to add, 0 to change, 2 to destroy.
Jenkins is out of date due to user data. It looks like the user data that currently exists contains some references to Datadog (which we no longer use) and the current code adds automatically attaching the EIP. As Jenkins can affect our deployment pipeline we should coordinate with @escattone on when we can have some downtime here.