Closed bmanojlovic closed 6 years ago
I'll check it out. Please add your terraform configurations and regenerated steps. Thanks. 😄
so what i have done is next, i created template from cloudimage using ansible:
ansible-playbook image-upload.yml
content of that playbook is next:
---
- name: oVirt image template
hosts: localhost
connection: local
gather_facts: false
vars_files:
- passwords.yml
vars:
engine_url: https://127.0.0.1/ovirt-engine/api
engine_user: admin@internal
engine_cafile: /etc/pki/ovirt-engine/ca.pem
qcow_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
template_cluster: MYCluster
template_name: centos7_20180720
template_operating_system: rhel_7x64
template_memory: 4GiB
template_type: server
template_cpu: 4
template_disk_size: 10GiB
template_nics:
- { name: nic1, profile_name: rhevm, interface: virtio }
roles:
- oVirt.image-template
after it is created i used it for next step, my .tf file:
variable "ovirt_url" {}
variable "ovirt_username" {}
variable "ovirt_pass" {}
provider "ovirt" {
username = "${var.ovirt_username}"
url = "${var.ovirt_url}"
password = "${var.ovirt_pass}"
}
data "ovirt_datacenters" "DC" {
name_regex = "MYDC"
}
data "ovirt_storagedomains" "sd" {
name_regex = "MYDC_DS"
search = {
criteria = "external_status = ok and datacenter = ${data.ovirt_datacenters.DC.datacenters.0.name}"
# max = 1
case_sensitive = false
}
}
resource "ovirt_disk" "my_disk_1" {
name = "my_disk_1"
alias = "my_disk_1"
size = 23687091200
format = "cow"
storage_domain_id = "${data.ovirt_storagedomains.sd.storagedomains.0.id}"
sparse = true
}
resource "ovirt_disk_attachment" "my_diskattachment_1" {
disk_id = "${ovirt_disk.my_disk_1.id}"
vm_id = "${ovirt_vm.my_vm_1.id}"
bootable = false
interface = "virtio"
}
data "ovirt_networks" "net01" {
name_regex = "MYNET"
search = {
criteria = "datacenter = ${data.ovirt_datacenters.DC.datacenters.0.name} and name = MYNET"
max = 1
case_sensitive = false
}
}
data "ovirt_networks" "net02" {
name_regex = "NET2"
search = {
criteria = "datacenter = ${data.ovirt_datacenters.DC.datacenters.0.name} and name = NET2"
max = 1
case_sensitive = false
}
}
resource "ovirt_vm" "my_vm_1" {
name = "my_vm_1"
cluster = "MYCluster"
authorized_ssh_key = "${file(pathexpand("~/.ssh/id_rsa.pub"))}"
network_interface {
network = "${data.ovirt_networks.net01.networks.0.name}"
label = "eth0"
boot_proto = "static"
ip_address = "10.11.22.250"
gateway = "10.11.23.254"
subnet_mask = "255.255.254.0"
}
network_interface {
network = "${data.ovirt_networks.net02.networks.0.name}"
label = "eth1"
boot_proto = "static"
ip_address = "10.10.11.231"
gateway = "10.10.11.254"
subnet_mask = "255.255.255.0"
}
template = "centos7_20180720"
}
and terraform apply
terraform apply -auto-approve
data.ovirt_datacenters.DC: Refreshing state...
data.ovirt_networks.net01: Refreshing state...
data.ovirt_networks.net02: Refreshing state...
data.ovirt_storagedomains.sd: Refreshing state...
ovirt_disk.my_disk_1: Creating...
alias: "" => "my_disk_1"
format: "" => "cow"
name: "" => "my_disk_1"
size: "" => "23687091200"
sparse: "" => "true"
storage_domain_id: "" => "40164660-8dbd-11e8-995c-74d02b9d5600"
ovirt_vm.my_vm_1: Creating...
authorized_ssh_key: "" => "ssh-rsa AAAAB3NzaC1yc2\n"
cluster: "" => "MYCluster"
cores: "" => "1"
name: "" => "my_vm_1"
network_interface.#: "0" => "2"
network_interface.0.boot_proto: "" => "static"
network_interface.0.gateway: "" => "10.11.22.254"
network_interface.0.ip_address: "" => "10.11.23.250"
network_interface.0.label: "" => "eth0"
network_interface.0.network: "" => "MYNET"
network_interface.0.on_boot: "" => "true"
network_interface.0.subnet_mask: "" => "255.255.254.0"
network_interface.1.boot_proto: "" => "static"
network_interface.1.gateway: "" => "10.10.11.254"
network_interface.1.ip_address: "" => "10.10.11.231"
network_interface.1.label: "" => "eth1"
network_interface.1.network: "" => "NET2"
network_interface.1.on_boot: "" => "true"
network_interface.1.subnet_mask: "" => "255.255.255.0"
sockets: "" => "1"
template: "" => "centos7_20180720"
threads: "" => "1"
ovirt_disk.my_disk_1: Creation complete after 1s (ID: dd1a0776-e0c9-4245-a664-01adfe4e7740)
ovirt_vm.my_vm_1: Still creating... (10s elapsed)
ovirt_vm.my_vm_1: Creation complete after 10s (ID: 838146cc-0064-4b2b-bb5a-e471aa62bd53)
ovirt_disk_attachment.my_diskattachment_1: Creating...
active: "" => "true"
bootable: "" => "false"
disk_id: "" => "dd1a0776-e0c9-4245-a664-01adfe4e7740"
interface: "" => "virtio"
read_only: "" => "false"
use_scsi_reservation: "" => "false"
vm_id: "" => "838146cc-0064-4b2b-bb5a-e471aa62bd53"
Error: Error applying plan:
1 error(s) occurred:
* ovirt_disk_attachment.my_diskattachment_1: 1 error(s) occurred:
* ovirt_disk_attachment.my_diskattachment_1: Fault reason is "Operation Failed". Fault detail is "[Cannot attach Virtual Disk because the VM is in Image Locked status.]". HTTP response code is "409". HTTP response message is "409 Conflict".
Terraform does not automatically rollback in the face of errors.
Instead, your Terraform state file has been partially updated with
any resources that successfully completed. Please address the error
above and apply again to incrementally change your infrastructure.
make: *** [Makefile:3: apply] Error 1
steki@pc:~/k8s-dxc>
here is ASCII cast of race condition. and i have "cleaned up" my terraform file, so i can easy share it now :)
variable "ovirt_url" {}
variable "ovirt_username" {}
variable "ovirt_pass" {}
variable "dc_name_regex" {}
variable "dc_ds_name_regex" {}
variable "cl_name_regex" {}
variable "vm_net01_name" {}
variable "vm_net01_ip" {}
variable "vm_net01_gw" {}
variable "vm_net01_nm" {}
variable "vm_net02_name" {}
variable "vm_net02_ip" {}
variable "vm_net02_gw" {}
variable "vm_net02_nm" {}
variable "vm_name" {}
variable "vm_template" {}
variable "vm_disk_name" {}
variable "vm_disk_size" {}
provider "ovirt" {
username = "${var.ovirt_username}"
url = "${var.ovirt_url}"
password = "${var.ovirt_pass}"
}
data "ovirt_datacenters" "DC" {
name_regex = "${var.dc_name_regex}"
}
data "ovirt_storagedomains" "sd" {
name_regex = "${var.dc_ds_name_regex}"
search = {
criteria = "external_status = ok and datacenter = ${data.ovirt_datacenters.DC.datacenters.0.name}"
case_sensitive = false
}
}
resource "ovirt_disk" "additional_disk" {
name = "${var.vm_disk_name}"
alias = "${var.vm_disk_name}"
size = "${var.vm_disk_size}"
format = "cow"
storage_domain_id = "${data.ovirt_storagedomains.sd.storagedomains.0.id}"
sparse = true
}
resource "ovirt_disk_attachment" "my_diskattachment_1" {
disk_id = "${ovirt_disk.additional_disk.id}"
vm_id = "${ovirt_vm.vm01.id}"
bootable = false
interface = "virtio"
}
data "ovirt_networks" "net01" {
name_regex = "${var.vm_net01_name}"
search = {
criteria = "datacenter = ${data.ovirt_datacenters.DC.datacenters.0.name} and name = ${var.vm_net01_name}"
max = 1
case_sensitive = false
}
}
data "ovirt_networks" "net02" {
name_regex = "${var.vm_net02_name}"
search = {
criteria = "datacenter = ${data.ovirt_datacenters.DC.datacenters.0.name} and name = ${var.vm_net02_name}"
max = 1
case_sensitive = false
}
}
resource "ovirt_vm" "vm01" {
name = "${var.vm_name}"
cluster = "${var.cl_name_regex}"
authorized_ssh_key = "${file(pathexpand("~/.ssh/id_rsa.pub"))}"
network_interface {
network = "${data.ovirt_networks.net01.networks.0.name}"
label = "eth0"
boot_proto = "static"
ip_address = "${var.vm_net01_ip}"
gateway = "${var.vm_net01_gw}"
subnet_mask = "${var.vm_net01_nm}"
}
network_interface {
network = "${data.ovirt_networks.net02.networks.0.name}"
label = "eth1"
boot_proto = "static"
ip_address = "${var.vm_net02_ip}"
gateway = "${var.vm_net02_gw}"
subnet_mask = "${var.vm_net02_nm}"
}
template = "${var.vm_template}"
}
output "diskattachment_id" {
value = "${ovirt_disk_attachment.my_diskattachment_1.id}"
}
output "vm_id" {
value = "${ovirt_vm.vm01.id}"
}
output "network_id" {
value = "${data.ovirt_datacenters.DC.0.id}"
}
output "vm_additional_disk" {
value = "${ovirt_disk.additional_disk.id}"
}
The usage is not the typical usage of oVirt. In oVirt you can not create a vm from template(the template has disk) and add a additional disk in the same time. You can create the vm first, and then update it for additional disk. If you insist on this usage, you need a schema which can check the status of vm, only add additional disk when the status of vm is OK.
@bmanojlovic Now there's no a retry mechanism when attaching a new disk to a vm. I will work on it soon.
hi @bmanojlovic , the pr #40 may fix this issue. Please let me know your result. Thanks.
as mentioned in #40 issue is now that i do not receive from API VM template name so some other racy thing is going on. Will continue to investigate.
when trying to create VM from template , it is possible to trigger race condition in case that machine is still not fully deployed. API trace follows