Open Dawidro opened 2 years ago
@Dawidro Did you fix your reported issue? There is also the following issue https://github.com/dmacvicar/terraform-provider-libvirt/issues/840 from https://github.com/Dr4s1l, but there is only the mentioned reason of "User permission issue".
I reproduced your steps and your error. I have the following versions:
dmacvicar/terraform-provider-libvirt@0.6.11
terraform@1.0.8
I tried it with a simpler configuration, my libvirt.tf
looks like this:
terraform {
required_version = ">= 1.0.0"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "0.6.11"
}
}
}
provider "libvirt" {
uri = "qemu:///system"
}
#provider "libvirt" {
# alias = "server2"
# uri = "qemu+ssh://root@192.168.100.10/system"
#}
resource "libvirt_volume" "centos7-qcow2" {
name = "centos7.qcow2"
pool = "vdisk"
source = "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
#source = "./CentOS-7-x86_64-GenericCloud.qcow2"
format = "qcow2"
}
# Define KVM domain to create
resource "libvirt_domain" "db1" {
name = "db1"
memory = "1024"
vcpu = 1
network_interface {
network_name = "default"
}
disk {
volume_id = "${libvirt_volume.centos7-qcow2.id}"
}
console {
type = "pty"
target_type = "serial"
target_port = "0"
}
graphics {
type = "spice"
listen_type = "address"
autoport = true
}
}
Also i have .terraform/providers/registry.terraform.io/dmacvicar/libvirt/0.6.11/linux_amd64/terraform-provider-libvirt
.
I also created a storage pool vdisk
, which is configured like this:
<pool type='dir'>
<name>vdisk</name>
<uuid>957def39-9993-4a3f-b288-9d66a3a07c34</uuid>
<capacity unit='bytes'>134145380352</capacity>
<allocation unit='bytes'>17943326720</allocation>
<available unit='bytes'>116202053632</available>
<source>
</source>
<target>
<path>/mnt/pool</path>
<permissions>
<mode>0777</mode>
<owner>0</owner>
<group>0</group>
</permissions>
</target>
</pool>
The storage pool vdisk
is active:
$ virsh pool-list
Name State Autostart
-----------------------------
vdisk active yes
$ terraform init
$ terraform apply
will result in
libvirt_volume.centos7-qcow2: Creating...
╷
│ Error: can't find storage pool 'vdisk'
│
│ with libvirt_volume.centos7-qcow2,
│ on libvirt.tf line 20, in resource "libvirt_volume" "centos7-qcow2":
│ 20: resource "libvirt_volume" "centos7-qcow2" {
@dmacvicar I'm having sort of the same problem. I'm trying to use terraform-provider-libvirt
to create a VM with a disk on an existing storage pool, and if that's possible, I'm not very clear on how to do that.
Is it possible?
Description of Issue/Question
Storage pool is defined with:
$ virsh pool-define-as --name distro-pool --type dir --target $HOME/terraform/volumes
Then started with:$ virsh pool-start distro-pool
$ virsh pool-list shows its active
Name State Autostart default active yes distro-pool active yes gnome-boxes active yes
virsh pool-info distro-poolName: distro-pool UUID: 7c0fdfa9-ee05-4e13-a746-9ff3801cec5e State: running Persistent: yes Autostart: yes Capacity: 427.10 GiB Allocation: 21.19 GiB Available: 405.91 GiB
Planning deployed without errors but upon "terraform apply -auto-approve" Terraform complaining that it can't find "distro-pool"` # libvirt_domain.domain-distro[0] will be created
resource "libvirt_domain" "domain-distro" {
arch = (known after apply)
cloudinit = (known after apply)
disk = [
emulator = (known after apply)
fw_cfg_name = "opt/com.coreos/config"
id = (known after apply)
machine = (known after apply)
memory = 4096
name = "arch"
qemu_agent = false
running = true
vcpu = 2
console {
console {
network_interface {
libvirt_domain.domain-distro[1] will be created
resource "libvirt_domain" "domain-distro" {
arch = (known after apply)
cloudinit = (known after apply)
disk = [
emulator = (known after apply)
fw_cfg_name = "opt/com.coreos/config"
id = (known after apply)
machine = (known after apply)
memory = 4096
name = "debian1"
qemu_agent = false
running = true
vcpu = 2
console {
console {
network_interface {
libvirt_domain.domain-distro[2] will be created
resource "libvirt_domain" "domain-distro" {
arch = (known after apply)
cloudinit = (known after apply)
disk = [
emulator = (known after apply)
fw_cfg_name = "opt/com.coreos/config"
id = (known after apply)
machine = (known after apply)
memory = 4096
name = "debian2"
qemu_agent = false
running = true
vcpu = 2
console {
console {
network_interface {
libvirt_domain.domain-distro[3] will be created
resource "libvirt_domain" "domain-distro" {
arch = (known after apply)
cloudinit = (known after apply)
disk = [
emulator = (known after apply)
fw_cfg_name = "opt/com.coreos/config"
id = (known after apply)
machine = (known after apply)
memory = 4096
name = "debian3"
qemu_agent = false
running = true
vcpu = 2
console {
console {
network_interface {
libvirt_volume.distro-qcow2[0] will be created
resource "libvirt_volume" "distro-qcow2" {
libvirt_volume.distro-qcow2[1] will be created
resource "libvirt_volume" "distro-qcow2" {
libvirt_volume.distro-qcow2[2] will be created
resource "libvirt_volume" "distro-qcow2" {
libvirt_volume.distro-qcow2[3] will be created
Plan: 12 to add, 0 to change, 0 to destroy. libvirt_cloudinit_disk.commoninit[3]: Creating... libvirt_volume.distro-qcow2[0]: Creating... libvirt_cloudinit_disk.commoninit[1]: Creating... libvirt_cloudinit_disk.commoninit[0]: Creating... libvirt_cloudinit_disk.commoninit[2]: Creating... libvirt_volume.distro-qcow2[3]: Creating... libvirt_volume.distro-qcow2[1]: Creating... libvirt_volume.distro-qcow2[2]: Creating... ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_volume.distro-qcow2[0], │ on main.tf line 14, in resource "libvirt_volume" "distro-qcow2": │ 14: resource "libvirt_volume" "distro-qcow2" { │ ╵ ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_volume.distro-qcow2[1], │ on main.tf line 14, in resource "libvirt_volume" "distro-qcow2": │ 14: resource "libvirt_volume" "distro-qcow2" { │ ╵ ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_volume.distro-qcow2[3], │ on main.tf line 14, in resource "libvirt_volume" "distro-qcow2": │ 14: resource "libvirt_volume" "distro-qcow2" { │ ╵ ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_volume.distro-qcow2[2], │ on main.tf line 14, in resource "libvirt_volume" "distro-qcow2": │ 14: resource "libvirt_volume" "distro-qcow2" { │ ╵ ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_cloudinit_disk.commoninit[2], │ on main.tf line 22, in resource "libvirt_cloudinit_disk" "commoninit": │ 22: resource "libvirt_cloudinit_disk" "commoninit" { │ ╵ ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_cloudinit_disk.commoninit[1], │ on main.tf line 22, in resource "libvirt_cloudinit_disk" "commoninit": │ 22: resource "libvirt_cloudinit_disk" "commoninit" { │ ╵ ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_cloudinit_disk.commoninit[3], │ on main.tf line 22, in resource "libvirt_cloudinit_disk" "commoninit": │ 22: resource "libvirt_cloudinit_disk" "commoninit" { │ ╵ ╷ │ Error: can't find storage pool 'distro-pool' │ │ with libvirt_cloudinit_disk.commoninit[0], │ on main.tf line 22, in resource "libvirt_cloudinit_disk" "commoninit": │ 22: resource "libvirt_cloudinit_disk" "commoninit" { │ ╵ `
Setup
GNU nano 5.6.1 main.tf
` terraform { required_version = ">= 0.13" required_providers { libvirt = { source = "dmacvicar/libvirt" } } }
provider "libvirt" { uri = "qemu:///system" }
resource "libvirt_volume" "distro-qcow2" { count = var.hosts name = "${var.distros[count.index]}.qcow2" pool = "distro-pool" source = "${path.module}/sources/${var.distros[count.index]}.qcow2" format = "qcow2" }
resource "libvirt_cloudinit_disk" "commoninit" { count = var.hosts name = "commoninit-${var.vm_names[count.index]}.iso" pool = "distro-pool" user_data = templatefile("${path.module}/templates/user_data.tpl", { host_name = var.distros[count.index] auth_key = file("${path.module}/ssh/id_rsa.pub") })
network_config = templatefile("${path.module}/templates/network_config.tpl", { interface = var.interface ip_addr = var.ips[count.index] mac_addr = var.macs[count.index] }) }
resource "libvirt_domain" "domain-distro" { count = var.hosts name = var.distros[count.index] memory = var.memory vcpu = var.vcpu
cloudinit = element(libvirt_cloudinit_disk.commoninit.*.id, count.index)
network_interface { network_name = "default" addresses = [var.ips[count.index]] mac = var.macs[count.index] }
console { type = "pty" target_port = "0" target_type = "serial" }
console { type = "pty" target_port = "1" target_type = "virtio" }
disk { volume_id = element(libvirt_volume.distro-qcow2.*.id, count.index) } }
`
Steps to Reproduce Issue
terraform apply
Additional information:
Do you have SELinux or Apparmor/Firewall enabled? Some special configuration? Have you tried to reproduce the issue without them enabled? No