nutanix / terraform-provider-nutanix

Terraform Nutanix Provider
https://www.terraform.io/docs/providers/nutanix/
Mozilla Public License 2.0
101 stars 112 forks source link

foundation node provisioning fails on new HPE nodes with ilo6 #676

Closed geekdad-at-work closed 8 months ago

geekdad-at-work commented 8 months ago

Nutanix Cluster Information

Terraform Version

Terraform v1.6.5 on windows_amd64

also: Terraform v1.6.5 on windows_amd64

Affected Resource(s)

Terraform Configuration Files

terraform { required_providers { nutanix = { source = "nutanix/nutanix" version = "1.9.4" } } }

provider "nutanix" { username = var.user password = var.password port = var.port endpoint = var.endpoint insecure = true wait_timeout = 60 foundation_endpoint = "localhost" }

// upload AOS image resource "nutanix_foundation_image" "image1" { source = "C:\Users\Administrator\Downloads\nutanix_installer_package-release-fraser-6.5.5.5-stable-7527f87d7dd5567610d450af9e62f5980f7e99ee-x86_64.tar" filename = "nos-tempfile6.5.5.5.tar" installer_type = "nos" }

// Fetch all aos packages detais once upload finishes data "nutanix_foundation_nos_packages" "nos" { depends_on = [resource.nutanix_foundation_image.image1] }

output "nos" { value = data.nutanix_foundation_nos_packages.nos }

// start creating the cluster resource "nutanix_foundation_image_nodes" "batch1" {

// custom timeout, default is 60 minutes
timeouts {
    create = "120m"
}

// assuming theres only 1 nos package present in foundation vm
nos_package = data.nutanix_foundation_nos_packages.nos.entities[0]

// cvm, hypervisor & ipmi common details
cvm_netmask = "255.255.255.0"
cvm_gateway = "172.xx.xx.1"
hypervisor_gateway = "172.xx.xx.1"
hypervisor_netmask = "255.255.255.0"
ipmi_gateway = "172.xx.xx.1"
ipmi_netmask = "255.255.255.0"

// this are defaults, you can also mention node specific creds in node spec
ipmi_user = "Administrator"
ipmi_password = var.ipmi_password

// adding one block of nodes
blocks{
    // adding multiple nodes
    nodes{
        hypervisor_hostname="node101"
        hypervisor_ip= "172.xx.xx.31"
        hypervisor= "kvm"
        image_now= true
        ipmi_ip="172.xx.xx.41"
        cvm_ip= "172.xx.xx.21"
        node_position= "A"
        cvm_gb_ram=64
        cvm_num_vcpus=16
        bond_mode="dynamic"
        bond_lacp_rate="fast"
    }
    block_id = "001"
}
blocks{
    // adding multiple nodes
    nodes{
        hypervisor_hostname="node102"
        hypervisor_ip= "172.xx.xx.32"
        hypervisor= "kvm"
        image_now= true
        ipmi_ip="172.xx.xx.42"
        cvm_ip= "172.xx.xx.22"
        node_position= "A"
        cvm_gb_ram=64
        cvm_num_vcpus=16
        bond_mode="dynamic"
        bond_lacp_rate="fast"
    }
    block_id = "002"
}
blocks{
    // adding multiple nodes
    nodes{
        hypervisor_hostname="node103"
        hypervisor_ip= "172.xx.xx.33"
        hypervisor= "kvm"
        image_now= true
        ipmi_ip="172.xx.xx.43"
        cvm_ip= "172.xx.xx.23"
        node_position= "A"
        cvm_gb_ram=64
        cvm_num_vcpus=16
        bond_mode="dynamic"
        bond_lacp_rate="fast"
    }
    block_id = "003"
}
blocks{
    // adding multiple nodes
    nodes{
        hypervisor_hostname="node104"
        hypervisor_ip= "172.xx.xx.34"
        hypervisor= "kvm"
        image_now= true
        ipmi_ip="172.xx.xx.44"
        cvm_ip= "172.xx.xx.24"
        node_position= "A"
        cvm_gb_ram=64
        cvm_num_vcpus=16
        bond_mode="dynamic"
        bond_lacp_rate="fast"
    }
    block_id = "004"
}

// add cluster block
clusters {
    redundancy_factor = 2
    cluster_name = "cluster100sb"
    single_node_cluster = false // not required. make it true for single node cluster creation
    cluster_init_now = true
    cluster_external_ip = "172.xx.xx.11"
    cluster_members = ["172.xx.xx.21","172.xx.xx.22","172.xx.xx.23","172.xx.xx.24"]
    enable_ns=true
    backplane_subnet="192.168.180.0"
    backplane_netmask="255.255.255.0"
    backplane_vlan=140
    //cvm_ntp_servers=["172.xx.xx.181","172.xx.xx.192"]
    cvm_ntp_servers="172.xx.xx.181,172.xx.xx.192"
    cvm_dns_servers="172.xx.xx.181,172.xx.xx.192"
    hypervisor_ntp_servers="172.xx.xx.181,172.xx.xx.192"
    timezone="Etc/UTC"
}

}

output "session" { value = resource.nutanix_foundation_image_nodes.batch1 }

Debug Output

https://gist.github.com/geekdad-at-work/78629bde98ea71564255b6b8a049d6b3

Panic Output

N/A

Expected Behavior

Foundation should have provisioned the nodes via the iLO/IPMI/BMC and created a new cluster

Actual Behavior

Error for unsupported BMC

Steps to Reproduce

  1. terraform apply

Important Factors

References

geekdad-at-work commented 8 months ago

Looks like this is a Foundation issue after all.