nutanix / terraform-provider-nutanix

Terraform Nutanix Provider
https://www.terraform.io/docs/providers/nutanix/
Mozilla Public License 2.0
95 stars 111 forks source link

Example Doc - Ubuntu VM deployment with static IP #628

Closed ktinthecloud closed 11 months ago

ktinthecloud commented 1 year ago

Please provide a working example of deploying an Ubuntu VM with a static IP. I've tried different combinations of passing metadata or userdata via guest_customization_cloud_init_user_data or guest_customization_cloud_init_meta_data, but nothing seems to work.

davhdavh commented 11 months ago

It took forever to figure this out...

power_state:
  delay: now
  mode: reboot
  message: Reboot after init
  timeout: 120
  condition: true

write_files:
- path: /etc/netplan/50-cloud-init.yaml
  permissions: '0600'
  content: |
    network:
      version: 2
      ethernets:
        e:
          match:
            name: e*
          set-name: ens3
          addresses:
          - 192.168.229.${100+count.index}/24
          routes:
          - to: default
            via: 192.168.229.1
          nameservers:
            search: [xxx]
            addresses:
            %{ for s in data.nutanix_clusters.clusters.entities.0.name_server_ip_list ~}
            - ${s}
            %{ endfor ~}

I still haven't figured out, how to give it the right ip without a reboot

ktinthecloud commented 11 months ago

@davhdavh, are you using guest_customization_cloud_init_user_data or guest_customization_cloud_init_meta_data? Can you share your main.tf?

davhdavh commented 11 months ago

figured out how to do do it without reboot... It will still contact dhcp during boot because guest_customization_cloud_init_meta_data is broken and you cannot actually specify network config. So either wait a few min for the dhcp retry to give up, or have a dhcp server.

here is a more complete example, that will

  1. create 1 vm per physical machine on existing network
  2. affinity or agent setting isn't implemented in terraform provider, so those needs to be done manual after
  3. use the ssh key you can login to the CVM to also login to these VMs
  4. install guest tools
  5. Add a few comments about all the things that doesn't work
  6. Note the terraform operator will return when the VMs are created, not after cloud-init is done, so ip detection doesn't work.
  7. I still force a reboot, but it isn't required anymore for networking

data "nutanix_clusters" "clusters" {}
locals {
  cluster = [
    for cluster in data.nutanix_clusters.clusters.entities :
    cluster if cluster.service_list[0] != "PRISM_CENTRAL"
    ][0]
}
data "nutanix_hosts" "hosts" {
}
locals {
  hosts = [
    for host in data.nutanix_hosts.hosts.entities :
    host if host.cluster_reference.uuid == local.cluster.metadata.uuid
    ]
}
#output "hosts" {
#  value = local.hosts
#}

data "nutanix_subnet" "network" {
   subnet_name = "blabla"
}

resource "nutanix_image" "debian_cloud" {
  name        = "debian12"
  description = "debian12"
  #TODO update doesnt work, you MUST taint/destroy it manually to force a recreate
  #  it cheats you, because it visually looks like it is working, but I dont see it using the new image after update without taint
  source_uri  = "https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
}

resource "nutanix_virtual_machine" "linux" {
  count = length(local.hosts)
  name = "linux${count.index + 1}"
  cluster_uuid = local.cluster.metadata.uuid

  num_vcpus_per_socket = 8
  num_sockets          = local.hosts[count.index].num_cpu_sockets
  memory_size_mib      = 4096

  lifecycle {
    ignore_changes = [
      guest_customization_cloud_init_user_data,
      disk_list.0.disk_size_bytes,
      disk_list.0.data_source_reference,
      disk_list.0.device_properties,
      disk_list.1.disk_size_bytes,
      disk_list.1.data_source_reference,
      disk_list.1.device_properties,
      owner_reference,
      nutanix_guest_tools,
      project_reference,
    ]
  }

  disk_list {
    # This means make a clone of the qcow2 image and use as base for root
    data_source_reference = {
      kind = "image"
      uuid = resource.nutanix_image.debian_cloud.id
    }
    # and this will then trigger a extendfs after boot
    disk_size_mib   = 10000

    device_properties {
      device_type = "DISK"
      disk_address = {
        device_index = 1
        adapter_type = "SCSI"
      }
    }

#this doesn't work when using a data_source_reference
#    storage_config {
#      storage_container_reference {
#        kind = "storage_container"
#        uuid = "blabla" #find the right one in https://nutanix:9440/api/nutanix/v2/api_explorer/index.html#!/storage95containers/getContainers 
#      }
#    }
  }

 # for nutanix_guest_tools
 disk_list {
   disk_size_bytes = 0

   data_source_reference = {}

   device_properties {
     device_type = "CDROM"
     disk_address = {
       device_index = "1"
       adapter_type = "SATA"
     }
   }
 }
 nutanix_guest_tools = {
   state           = "ENABLED",
   # ngt_state       = "INSTALLED",
   iso_mount_state = "MOUNTED"
 }

 ngt_enabled_capability_list = [
   "VSS_SNAPSHOT"
 ]

  boot_type = "SECURE_BOOT"
  machine_type = "Q35"
  hardware_clock_timezone = "UTC"

  nic_list {
    subnet_uuid = data.nutanix_subnet.network.id
    #mac_address = "50:6b:8d:de:4e:${format("%02d", count.index)}"
  }

  guest_customization_cloud_init_user_data = base64encode(<<CLOUDINIT
#cloud-config
hostname: linux${count.index + 1}
fqdn: linux${count.index + 1}.whatever.local
ssh_pwauth: false
package_update: true
package_upgrade: true
timezone: CET
locale: C.UTF-8

power_state:
  mode: reboot
  delay: now
  message: Reboot after init

users:
- default
- name: myuser
  sudo: ALL=(ALL) NOPASSWD:ALL
  shell: /bin/bash
  ssh_authorized_keys:
%{ for s in local.cluster.authorized_public_key_list ~}
  - ${s.key}
%{ endfor ~}

packages:
- python-is-python3
- sudo
- dmidecode
- lvm2

apt:
  primary:
  - arches: [default]
    search_dns: true

#swap:
#  filename: /swap.img
#  size: "auto"

write_files:
- path: /etc/netplan/50-cloud-init.yaml
  permissions: '0600'
  content: |
    network:
      version: 2
      ethernets:
        e:
          match:
            name: e*
          set-name: ens3
          addresses:
          - 192.168.229.${101+count.index}/24
          routes:
          - to: default
            via: 192.168.229.1
          nameservers:
            search: [whatever.local]
            addresses:
%{ for s in local.cluster.name_server_ip_list ~}
            - ${s}
%{ endfor ~}

runcmd:
- netplan apply
- [mkdir, /mnt/nutanix]
- [mount, /dev/sr1, /mnt/nutanix]
- [python, /mnt/nutanix/installer/linux/install_ngt.py]
%{ for idx, s in local.hosts ~}
%{ if idx == count.index ~}
- echo "127.0.0.1 linux${idx + 1}" >> /etc/hosts
%{ else ~}
- echo "192.168.229.${101+idx} linux${idx + 1}" >> /etc/hosts
%{ endif ~}
%{ endfor ~}
- blabla
}
davhdavh commented 11 months ago

Found an alternative way, this will get the right ip from boot, but requires you use the dhcp from nutanix. Here I create a new, but if you already have one with dhcp, then just use the data.nutanix_subnet like above.


resource "nutanix_subnet" "bla" {
  name = "bla-subnet"

  cluster_uuid = local.cluster.metadata.uuid

  vlan_id = 123
  subnet_type = "VLAN"

  prefix_length = 24
  default_gateway_ip = "192.168.14.1"
  subnet_ip = "192.168.14.0"
  ip_config_pool_list_ranges = ["192.168.14.200 192.168.14.250"]

  dhcp_domain_name_server_list = ["8.8.8.8"]

}

resource "nutanix_virtual_machine" "test2" {
  count = 1
  name = "testlinux${count.index + 1}"
  cluster_uuid = local.cluster.metadata.uuid

  num_vcpus_per_socket = 8
  num_sockets          = local.hosts[count.index].num_cpu_sockets
  memory_size_mib      = 4096

  lifecycle {
    ignore_changes = [
      guest_customization_cloud_init_user_data,
      disk_list.0.disk_size_bytes,
      disk_list.0.data_source_reference,
      disk_list.0.device_properties,
      disk_list.1.disk_size_bytes,
      disk_list.1.data_source_reference,
      disk_list.1.device_properties,
      owner_reference,
      nutanix_guest_tools,
      project_reference,
    ]
  }

  disk_list {
    # This means make a clone of the qcow2 image and use as base for root
    data_source_reference = {
      kind = "image"
      uuid = resource.nutanix_image.debian_cloud.id
    }
    # and this will then trigger a extendfs after boot
    disk_size_mib   = 10000

    device_properties {
      device_type = "DISK"
      disk_address = {
        device_index = 1
        adapter_type = "SCSI"
      }
    }
  }

 # for nutanix_guest_tools
 disk_list {
   disk_size_bytes = 0

   data_source_reference = {}

   device_properties {
     device_type = "CDROM"
     disk_address = {
       device_index = "1"
       adapter_type = "SATA"
     }
   }
 }

  boot_type = "SECURE_BOOT"
  machine_type = "Q35"
  hardware_clock_timezone = "UTC"

  nic_list {
    subnet_uuid = nutanix_subnet.bla.id
    ip_endpoint_list {
      ip = "192.168.14.100"
      type = "ASSIGNED"
    }
  }

  guest_customization_cloud_init_user_data = base64encode(<<CLOUDINIT
#cloud-config
hostname: linux${count.index + 1}
ssh_pwauth: false
package_update: true
package_upgrade: true
timezone: CET
locale: C.UTF-8

users:
- default
- name: bla
  sudo: ALL=(ALL) NOPASSWD:ALL
  shell: /bin/bash
  passwd: $1$SaltSalt$YhgRYajLPrYevs14poKBQ0
  lock_passwd: false
  ssh_authorized_keys:
%{ for s in local.cluster.authorized_public_key_list ~}
  - ${s.key}
%{ endfor ~}
CLOUDINIT
)
}
ktinthecloud commented 11 months ago

Thanks @davhdavh. I tested the "alternate way" and it works for me, but if you don't deploy to an IPAM/DHCP VLAN then the packages listed in the cloud-config do not get installed. See my main.tf below:

terraform {
  required_providers {
    nutanix = {
      source  = "nutanix/nutanix"
      version = "1.9.2"
    }
  }
}

data "nutanix_cluster" "cluster" {
  name = var.cluster_name
}

data "nutanix_subnet" "subnet" {
  subnet_name = var.subnet_name

}

provider "nutanix" {
  username     = var.username
  password     = var.password
  endpoint     = var.endpoint
  insecure     = true
}

resource "nutanix_virtual_machine" "vm1" {
  name                 = var.vm_name
  cluster_uuid         = data.nutanix_cluster.cluster.id
  num_vcpus_per_socket = "2"
  num_sockets          = "1"
  memory_size_mib      = 4096
  boot_type            = "UEFI"

  disk_list {
    data_source_reference = {
      kind = "image"
      uuid = "<image_uuid_#>"
    }
  }

  nic_list {
    subnet_uuid = data.nutanix_subnet.subnet.id
  }

  guest_customization_cloud_init_user_data = base64encode(<<CLOUDINIT
#cloud-config
hostname: testubuntu2004
package_update: true
package_upgrade: true
timezone: America/Los_Angeles

power_state:
  mode: reboot
  delay: now
  message: Reboot after init

users:
- default
- name: user
  sudo: ALL=(ALL) NOPASSWD:ALL
  shell: /bin/bash
chpasswd: 
  list: |
    user:password
  expire: false
ssh_pwauth: true

packages:
- nginx
- nmap

apt:
  primary:
  - arches: [default]
    search_dns: true

write_files:
- path: /etc/netplan/50-cloud-init.yaml
  permissions: '0600'
  content: |
    network:
      version: 2
      ethernets:
        e:
          match:
            name: e*
          set-name: ens3
          addresses:
          - x.x.x.x/24
          routes:
          - to: default
            via: x.x.x.x
          nameservers:
            search: [domain.com]
            addresses:
            - x.x.x.x
            - x.x.x.x

CLOUDINIT
)
}