hashicorp / terraform-provider-vsphere

Terraform Provider for VMware vSphere
https://registry.terraform.io/providers/hashicorp/vsphere/
Mozilla Public License 2.0
616 stars 451 forks source link

v0.4.0 vsphere_folder state migration issue #186

Closed vancluever closed 7 years ago

vancluever commented 7 years ago

Decoupling this from #156 - from @pryorda


Having this issue as well. Going from v0.3 to 0.4 breaks stuffs

Module

# Providers
# VSphere
provider "vsphere" {
  user                 = "${var.vsphere_username}"
  password             = "${var.vsphere_password}"
  vsphere_server       = "${var.vsphere_server}"
  allow_unverified_ssl = "${var.vsphere_allow_unverified_ssl}"
}

resource "vsphere_folder" "instance" {
  path       = "${var.vsphere_folder_path}"
  datacenter = "${var.vsphere_datacenter}"
  count   = "${var.vsphere_network_ipv4_addresses != "" ? 1 : 0}"
}

# DNS for instances
data "aws_route53_zone" "private-domain" {
  name         = "${lower(var.provider)}.private.com."
  private_zone = true
}

resource "aws_route53_record" "instance-dns" {
  zone_id = "${data.aws_route53_zone.private-domain.id}"
  type    = "A"
  ttl     = "900"
  records = ["${element(split(",", var.vsphere_network_ipv4_addresses), count.index)}"]
  name    = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}"
  count   = "${var.vsphere_network_ipv4_addresses != "" ? length(split(",", "${var.vsphere_network_ipv4_addresses}")) : 0}"
}

# Instance Resource
resource "vsphere_virtual_machine" "instance" {
  count   = "${var.vsphere_network_ipv4_addresses != "" ? length(split(",", "${var.vsphere_network_ipv4_addresses}")) : 0}"
  name  = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}"

  detach_unknown_disks_on_delete = true

  folder     = "${vsphere_folder.instance.path}"
  datacenter = "${var.vsphere_datacenter}"
  cluster    = "${var.vsphere_cluster}"
  domain     = "${lower(var.region)}.${lower(var.provider)}.private.com"

  dns_servers  = ["${split(",",var.vsphere_network_domain_resolvers)}"]
  dns_suffixes = ["${lower(var.region)}.${lower(var.provider)}.private.com","${split(",",var.vsphere_network_domain_search)}"]
  vcpu         = "${var.vsphere_vcpu}"
  memory       = "${var.vsphere_memory}"
  time_zone    = "${var.vsphere_cluster_timezone}"

  network_interface {
    label              = "${var.vsphere_network_label}"
    ipv4_address       = "${element(split(",", var.vsphere_network_ipv4_addresses), count.index)}"
    ipv4_prefix_length = "${var.vsphere_network_ipv4_prefix_length}"
    ipv4_gateway       = "${var.vsphere_network_ipv4_gateway}"
  }

  disk {
    datastore = "${var.vsphere_datastore}"
    template  = "${var.vsphere_template}"
    type      = "thin"
  }

  disk {
    datastore = "${var.vsphere_datastore}"
    vmdk      = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}.data_disk.vmdk"
    keep_on_remove = true
  }

  lifecycle {
    ignore_changes = ["network_interface", "dns_suffixes"]
  }
}

resource "vsphere_virtual_disk" "data_disk" {
  count   = "${var.vsphere_network_ipv4_addresses != "" ? length(split(",", "${var.vsphere_network_ipv4_addresses}")) : 0}"
  size          = "${var.data_disk_size}"
  vmdk_path     = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}.data_disk.vmdk"
  datacenter    = "${var.vsphere_datacenter}"
  datastore     = "${var.vsphere_datastore}"
  type          = "thin"
  adapter_type  = "lsiLogic"
  lifecycle {
    prevent_destroy = true
  }
}

output "instance_ids" {
  value = ["${vsphere_virtual_machine.instance.*.id}"]
}

output "instances_dns" {
  value = ["${vsphere_virtual_machine.instance.*.name}"]
}

output "instance_private_ips" {
  value = ["${vsphere_virtual_machine.instance.*.network_interface.0.ipv4_address}"]
}

Example .tfvars

# Application Network
vsphere_network_label = "dvPortGroup_qa"
vsphere_network_ipv4_gateway = "172.16.123.1"
vsphere_network_ipv4_addresses = "172.16.123.65"

# VM Location
vsphere_cluster = "VMSAC02 Cluster"
vsphere_folder_path = "QA/Beta"
vsphere_datastore = "Nimble Datastores/beta"

# VM Specs
vsphere_vcpu = "2"
vsphere_memory = "2048"
data_disk_size = "50"

# Labels
product= "infrastructure"
environment = "beta"
application = "consul"
component = "server"
region = "sac"
provider = "op"

# Chef Details
run_list = [ "role[consul-standalone" ]

# Monitors
consul_frontend = "http://beta-consul-server1.sac.op.private.com:8500/ui/"

v0.4 attempt

# Providers
# VSphere
provider "vsphere" {
  user                 = "${var.vsphere_username}"
  password             = "${var.vsphere_password}"
  vsphere_server       = "${var.vsphere_server}"
  allow_unverified_ssl = "${var.vsphere_allow_unverified_ssl}"
}

data "vsphere_datacenter" "datacenter" {
  name = "${var.vsphere_datacenter}"
}
resource "vsphere_folder" "instance" {
  type           = "vm"
  path           = "${var.vsphere_folder_path}"
  datacenter_id  = "${data.vsphere_datacenter.datacenter.id}"
  count          = "${var.vsphere_network_ipv4_addresses != "" ? 1 : 0}"
}

# DNS for instances
data "aws_route53_zone" "private-domain" {
  name         = "${lower(var.provider)}.private.com."
  private_zone = true
}

resource "aws_route53_record" "instance-dns" {
  zone_id = "${data.aws_route53_zone.private-domain.id}"
  type    = "A"
  ttl     = "900"
  records = ["${element(split(",", var.vsphere_network_ipv4_addresses), count.index)}"]
  name    = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}"
  count   = "${var.vsphere_network_ipv4_addresses != "" ? length(split(",", "${var.vsphere_network_ipv4_addresses}")) : 0}"
}

# Instance Resource
resource "vsphere_virtual_machine" "instance" {
  count   = "${var.vsphere_network_ipv4_addresses != "" ? length(split(",", "${var.vsphere_network_ipv4_addresses}")) : 0}"
  name  = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}"

  detach_unknown_disks_on_delete = true

  folder     = "${vsphere_folder.instance.path}"
  datacenter = "${var.vsphere_datacenter}"
  cluster    = "${var.vsphere_cluster}"
  domain     = "${lower(var.region)}.${lower(var.provider)}.private.com"

  dns_servers  = ["${split(",",var.vsphere_network_domain_resolvers)}"]
  dns_suffixes = ["${lower(var.region)}.${lower(var.provider)}.private.com","${split(",",var.vsphere_network_domain_search)}"]
  vcpu         = "${var.vsphere_vcpu}"
  memory       = "${var.vsphere_memory}"
  time_zone    = "${var.vsphere_cluster_timezone}"

  network_interface {
    label              = "${var.vsphere_network_label}"
    ipv4_address       = "${element(split(",", var.vsphere_network_ipv4_addresses), count.index)}"
    ipv4_prefix_length = "${var.vsphere_network_ipv4_prefix_length}"
    ipv4_gateway       = "${var.vsphere_network_ipv4_gateway}"
  }

  disk {
    datastore = "${var.vsphere_datastore}"
    template  = "${var.vsphere_template}"
    type      = "thin"
  }

  disk {
    datastore = "${var.vsphere_datastore}"
    vmdk      = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}.data_disk.vmdk"
    keep_on_remove = true
  }

  lifecycle {
    ignore_changes = ["network_interface", "dns_suffixes"]
  }
}

resource "vsphere_virtual_disk" "data_disk" {
  count   = "${var.vsphere_network_ipv4_addresses != "" ? length(split(",", "${var.vsphere_network_ipv4_addresses}")) : 0}"
  size          = "${var.data_disk_size}"
  vmdk_path     = "${lower(data.null_data_source.hostname.outputs["name"])}${count.index + 1}.${lower(var.region)}.data_disk.vmdk"
  datacenter    = "${var.vsphere_datacenter}"
  datastore     = "${var.vsphere_datastore}"
  type          = "thin"
  adapter_type  = "lsiLogic"
  lifecycle {
    prevent_destroy = true
  }
}

output "instance_ids" {
  value = ["${vsphere_virtual_machine.instance.*.id}"]
}

output "instances_dns" {
  value = ["${vsphere_virtual_machine.instance.*.name}"]
}

output "instance_private_ips" {
  value = ["${vsphere_virtual_machine.instance.*.network_interface.0.ipv4_address}"]
}

and the error

Error refreshing state: 1 error(s) occurred:
* module.vsphere_linux.vsphere_folder.instance: vsphere_folder.instance: cannot locate folder: ServerFaultCode: The object has already been deleted or has not been completely created
vancluever commented 7 years ago

Hey @pryorda, if you have the capability to do so, can you try building #187 from source and seeing if it fixes your issue? It does not look like state migration was firing and as such the ID being referenced was the old v0.3.x state ID, which is not a managed object reference.

Instructions here

Thanks!

pryorda commented 7 years ago

Ill update shortly.

pryorda commented 7 years ago

Looks like it worked. Build just does a plan.

CO007 ➜  consul-vsphere git:(DEVOPS-653) ✗ ./build.sh       
Encrypted data bag detected, decrypting with provided secret.
Encrypted data bag detected, decrypting with provided secret.
ERROR: The object you are looking for could not be found
Response: Cannot load data bag item consul-QA for data bag terraform-secrets
Downloading modules...
Get: file:///home/dpryor/git/terraform/modules/consul/url_monitor
Get: file:///home/dpryor/git/terraform/modules/vsphere_instance_linux
Get: file:///home/dpryor/git/terraform/modules/config_chef_linux
Get: file:///home/dpryor/git/terraform/modules/vsphere_instance_linux
Get: file:///home/dpryor/git/terraform/modules/config_chef_linux

Initializing the backend...

Successfully configured the backend "consul"! Terraform will automatically
use this backend unless the backend configuration changes.

Initializing provider plugins...
 - Checking for available provider plugins on https://releases.hashicorp.com...
- Downloading plugin for provider "template" (1.0.0)...
- Downloading plugin for provider "aws" (1.0.0)...
- Downloading plugin for provider "null" (1.0.0)...
- Downloading plugin for provider "consul" (1.0.0)...
- Downloading plugin for provider "chef" (0.1.0)...

The following providers do not have any version constraints in configuration,
so the latest version was installed.

To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.

* provider.aws: version = "~> 1.0"
* provider.chef: version = "~> 0.1"
* provider.consul: version = "~> 1.0"
* provider.null: version = "~> 1.0"
* provider.template: version = "~> 1.0"

Terraform has been successfully initialized!

You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.

If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
Command => terraform plan -input=true -var-file=envs/QA.tfvars -var-file=envs/secrets/QA.tfvars   . 
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.

data.template_file.monitor_json: Refreshing state...
data.null_data_source.hostname: Refreshing state...
data.null_data_source.role: Refreshing state...
consul_keys.url_monitor: Refreshing state... (ID: consul)
data.null_data_source.role: Refreshing state...
data.null_data_source.role: Refreshing state...
data.null_data_source.hostname: Refreshing state...
data.null_data_source.hostname: Refreshing state...
data.null_data_source.hostname: Refreshing state...
chef_role.consul-standalone: Refreshing state... (ID: consul-standalone)
data.null_data_source.role: Refreshing state...
vsphere_virtual_disk.data_disk.2: Refreshing state... (ID: qa-consul-server3.sac.data_disk.vmdk)
vsphere_virtual_disk.data_disk.1: Refreshing state... (ID: qa-consul-server2.sac.data_disk.vmdk)
vsphere_virtual_disk.data_disk.0: Refreshing state... (ID: qa-consul-server1.sac.data_disk.vmdk)
data.vsphere_datacenter.datacenter: Refreshing state...
data.vsphere_datacenter.datacenter: Refreshing state...
vsphere_folder.instance: Refreshing state... (ID: Datacenter/QA/QAEX)
data.aws_route53_zone.private-domain: Refreshing state...
data.aws_route53_zone.private-domain: Refreshing state...
aws_route53_record.instance-dns.2: Refreshing state... (ID: ..-consul-server3.sac_A)
aws_route53_record.instance-dns.1: Refreshing state... (ID: ...-consul-server2.sac_A)
aws_route53_record.instance-dns.0: Refreshing state... (ID: ...-consul-server1.sac_A)
vsphere_virtual_machine.instance.2: Refreshing state... (ID: QA/QAEX/qa-consul-server3.sac)
vsphere_virtual_machine.instance.0: Refreshing state... (ID: QA/QAEX/qa-consul-server1.sac)
vsphere_virtual_machine.instance.1: Refreshing state... (ID: QA/QAEX/qa-consul-server2.sac)
null_resource.cluster_chef_provisioning.0: Refreshing state... (ID: 4030699871865257443)
null_resource.cluster_chef_provisioning.1: Refreshing state... (ID: 2470691103505233667)
null_resource.cluster_chef_provisioning.2: Refreshing state... (ID: 4040479122863971535)
No changes. Infrastructure is up-to-date.

This means that Terraform did not detect any differences between your
configuration and real physical resources that exist. As a result, Terraform
doesn't need to do anything.
CO007 ➜  consul-vsphere git:(DEVOPS-653) ✗ 
vancluever commented 7 years ago

Great! Will be merging the fix shortly and there will be a release shortly :+1:

pryorda commented 7 years ago

Can you update thread this once its been released?

vancluever commented 7 years ago

Hey @pryorda - sorry, was heads down on another issue - v0.4.1 should have been released a while ago now (probably about 10 minutes after that update was posted). You should be able to grab it now!

pryorda commented 7 years ago

Yep, Thank you sir!