Closed chn-lee-yumi closed 2 years ago
Does restarting the instance fix it?
Does restarting the instance fix it?
Nope. It doesn't work.
root@intel-compute-stick:~# lxc config get test1 limits.cpu
2
root@intel-compute-stick:~# lxc restart test1
root@intel-compute-stick:~# lxc exec test1 nproc
4
Ok, can you show:
Here's the result:
root@intel-compute-stick:~# lxc exec test1 -- cat /proc/cpuinfo
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 55
model name : Intel(R) Atom(TM) CPU Z3735F @ 1.33GHz
stepping : 8
microcode : 0x838
cpu MHz : 1666.000
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 11
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes rdrand lahf_lm 3dnowprefetch epb pti ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid tsc_adjust smep erms dtherm ida arat md_clear
vmx flags : vnmi preemption_timer invvpid ept_x_only flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest
bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
bogomips : 2666.66
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 55
model name : Intel(R) Atom(TM) CPU Z3735F @ 1.33GHz
stepping : 8
microcode : 0x838
cpu MHz : 499.800
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 11
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes rdrand lahf_lm 3dnowprefetch epb pti ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid tsc_adjust smep erms dtherm ida arat md_clear
vmx flags : vnmi preemption_timer invvpid ept_x_only flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest
bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
bogomips : 2666.66
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 55
model name : Intel(R) Atom(TM) CPU Z3735F @ 1.33GHz
stepping : 8
microcode : 0x838
cpu MHz : 1249.500
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 11
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes rdrand lahf_lm 3dnowprefetch epb pti ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid tsc_adjust smep erms dtherm ida arat md_clear
vmx flags : vnmi preemption_timer invvpid ept_x_only flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest
bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
bogomips : 2666.66
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 55
model name : Intel(R) Atom(TM) CPU Z3735F @ 1.33GHz
stepping : 8
microcode : 0x838
cpu MHz : 1332.086
cache size : 1024 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 6
initial apicid : 6
fpu : yes
fpu_exception : yes
cpuid level : 11
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology tsc_reliable nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 cx16 xtpr pdcm sse4_1 sse4_2 movbe popcnt tsc_deadline_timer aes rdrand lahf_lm 3dnowprefetch epb pti ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid tsc_adjust smep erms dtherm ida arat md_clear
vmx flags : vnmi preemption_timer invvpid ept_x_only flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest
bugs : cpu_meltdown spectre_v1 spectre_v2 mds msbds_only
bogomips : 2666.66
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
root@intel-compute-stick:~# lxc exec test1 -- cat /proc/mounts
/dev/mmcblk1p2 / btrfs rw,relatime,idmapped,ssd,space_cache=v2,user_subvol_rm_allowed,subvolid=263,subvol=/mnt/lxd_storage/containers/test1 0 0
none /dev tmpfs rw,relatime,size=492k,mode=755,uid=1000000,gid=1000000,inode64 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
sysfs /sys sysfs rw,relatime 0 0
udev /dev/fuse devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
udev /dev/net/tun devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0
efivarfs /sys/firmware/efi/efivars efivarfs rw,nosuid,nodev,noexec,relatime 0 0
fusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0
pstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0
configfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0
mqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/lxd tmpfs rw,relatime,size=100k,mode=755,inode64 0 0
tmpfs /dev/.lxd-mounts tmpfs rw,relatime,size=100k,mode=711,inode64 0 0
none /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime 0 0
lxcfs /proc/cpuinfo fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /proc/diskstats fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /proc/loadavg fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /proc/meminfo fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /proc/slabinfo fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /proc/stat fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /proc/swaps fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /proc/uptime fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
lxcfs /sys/devices/system/cpu fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0
udev /dev/full devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
udev /dev/null devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
udev /dev/random devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
udev /dev/tty devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
udev /dev/urandom devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
udev /dev/zero devtmpfs rw,nosuid,relatime,size=909140k,nr_inodes=227285,mode=755,inode64 0 0
devpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=1000005,mode=620,ptmxmode=666,max=1024 0 0
devpts /dev/ptmx devpts rw,nosuid,noexec,relatime,gid=1000005,mode=620,ptmxmode=666,max=1024 0 0
devpts /dev/console devpts rw,nosuid,noexec,relatime,gid=1000005,mode=620,ptmxmode=666,max=1024 0 0
none /proc/sys/kernel/random/boot_id tmpfs ro,nosuid,nodev,noexec,relatime,size=492k,mode=755,uid=1000000,gid=1000000,inode64 0 0
tmpfs /dev/shm tmpfs rw,nosuid,nodev,uid=1000000,gid=1000000,inode64 0 0
tmpfs /run tmpfs rw,nosuid,nodev,size=386384k,nr_inodes=819200,mode=755,uid=1000000,gid=1000000,inode64 0 0
tmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k,uid=1000000,gid=1000000,inode64 0 0
root@intel-compute-stick:~# lxc exec test1 -- cat /sys/fs/cgroup/cpuset.cpus
The last command returned an empty line.
Here are some additional info:
As I mentioned before, I am running a cluster with two arm devices and two x86 devices. I tried to create instances on different hosts. Interestingly, the cpu limits worked on one of the x86 device. These two x86 devices' operation systems were installed from the same iso, so that's strange.
The working device is an inferior brand HTPC with Atom Z8350. The non-working device is an intel compute stick with Atom Z3735F.
Hmm, odd. Can you show lxc exec test1 -- cat /proc/self/cgroup
and also cat /var/snap/lxd/common/lxd/logs/test1/lxc.conf
?
Hmm, odd. Can you show
lxc exec test1 -- cat /proc/self/cgroup
and alsocat /var/snap/lxd/common/lxd/logs/test1/lxc.conf
?
Here's the output:
root@intel-compute-stick:~# lxc exec test1 -- cat /proc/self/cgroup
0::/.lxc
root@intel-compute-stick:~# cat /var/snap/lxd/common/lxd/logs/test1/lxc.conf
lxc.log.file = /var/snap/lxd/common/lxd/logs/test1/lxc.log
lxc.log.level = warn
lxc.console.buffer.size = auto
lxc.console.size = auto
lxc.console.logfile = /var/snap/lxd/common/lxd/logs/test1/console.log
lxc.mount.auto = proc:rw sys:rw cgroup:rw:force
lxc.autodev = 1
lxc.pty.max = 1024
lxc.mount.entry = /dev/fuse dev/fuse none bind,create=file,optional 0 0
lxc.mount.entry = /dev/net/tun dev/net/tun none bind,create=file,optional 0 0
lxc.mount.entry = /proc/sys/fs/binfmt_misc proc/sys/fs/binfmt_misc none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/firmware/efi/efivars sys/firmware/efi/efivars none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/fs/fuse/connections sys/fs/fuse/connections none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/fs/pstore sys/fs/pstore none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/config sys/kernel/config none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/debug sys/kernel/debug none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/security sys/kernel/security none rbind,create=dir,optional 0 0
lxc.mount.entry = /sys/kernel/tracing sys/kernel/tracing none rbind,create=dir,optional 0 0
lxc.mount.entry = /dev/mqueue dev/mqueue none rbind,create=dir,optional 0 0
lxc.include = /snap/lxd/current/lxc/config//common.conf.d/
lxc.arch = linux64
lxc.hook.version = 1
lxc.hook.pre-start = /proc/1315/exe callhook /var/snap/lxd/common/lxd "default" "test1" start
lxc.hook.stop = /snap/lxd/current/bin/lxd callhook /var/snap/lxd/common/lxd "default" "test1" stopns
lxc.hook.post-stop = /snap/lxd/current/bin/lxd callhook /var/snap/lxd/common/lxd "default" "test1" stop
lxc.tty.max = 0
lxc.uts.name = test1
lxc.mount.entry = /var/snap/lxd/common/lxd/devlxd dev/lxd none bind,create=dir 0 0
lxc.apparmor.profile = lxd-test1_</var/snap/lxd/common/lxd>//&:lxd-test1_<var-snap-lxd-common-lxd>:
lxc.seccomp.profile = /var/snap/lxd/common/lxd/security/seccomp/test1
lxc.idmap = u 0 1000000 1000000000
lxc.idmap = g 0 1000000 1000000000
lxc.mount.auto = shmounts:/var/snap/lxd/common/lxd/shmounts/test1:/dev/.lxd-mounts
lxc.net.0.type = phys
lxc.net.0.name = eth0
lxc.net.0.flags = up
lxc.net.0.link = vethfa56f4bb
lxc.rootfs.path = dir:/var/snap/lxd/common/lxd/storage-pools/local/containers/test1/rootfs
lxc.rootfs.options = idmap=container
And that's on a container which had a limits.cpu
prior to starting?
It's very odd because that lxc.conf
doesn't have any cgroup keys set in it.
Can you do lxc restart test1 --force
and then show the content of lxc.conf
again?
Also, can you show lxc warning list
?
I figure out the key to this issue: After installed the LXD, the host should restart.
The reason the cpu limits work well on my HTPC is that my HTPC is unstable and often halted, so it has been restarted. Other devices haven't restarted after installing the LXD, so the cpu limits didn't work.
Thanks for your help, I still have another problem with swap. I will open a new issue.
And that's on a container which had a limits.cpu prior to starting?
I start the container using lxc launch
first and then set the limits by lxc config set $name limits.cpu $core
.
Can you do lxc restart test1 --force and then show the content of lxc.conf again?
The output below is given when the limit.cpu is working. I notice that after restarted the container, the lxc.conf
shows cgroup2.memory, but whenever before restarting or after restarting, the limit.memory and limit.cpu are working.
Here's the diff:
root@intel-compute-stick:~# cat /var/snap/lxd/common/lxd/logs/test1/lxc.conf > 1.txt
root@intel-compute-stick:~# lxc restart test1 --force
root@intel-compute-stick:~# cat /var/snap/lxd/common/lxd/logs/test1/lxc.conf > 2.txt
root@intel-compute-stick:~# diff 1.txt 2.txt
32a33,35
> lxc.cgroup2.memory.max = 512000000
> lxc.cgroup2.memory.swap.max = 0
> lxc.cgroup2.memory.low = 460800000
37c40
< lxc.net.0.link = veth5b762610
---
> lxc.net.0.link = veth6b8cfd7b
Also, can you show lxc warning list?
root@intel-compute-stick:~# lxc warning list
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
| UUID | TYPE | STATUS | SEVERITY | COUNT | PROJECT | LOCATION | LAST SEEN |
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
| 7f731b49-fa88-405c-820c-55286f0d1a81 | Couldn't find the CGroup network priority controller | NEW | LOW | 1 | | 10.0.0.15 | Oct 6, 2022 at 2:04am (UTC) |
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
| 72db5497-859f-447e-b824-496feb4b43e6 | Couldn't find the CGroup hugetlb controller | NEW | LOW | 1 | | 10.0.0.15 | Oct 6, 2022 at 2:04am (UTC) |
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
| 637c43f7-a98b-4f92-9638-270c9790de2a | Offline cluster member | NEW | LOW | 5445 | | 10.0.0.15 | Oct 7, 2022 at 3:12am (UTC) |
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
| 2927619d-1279-4346-a15b-f2a967106fab | Instance type not operational | NEW | LOW | 1 | | 10.0.0.15 | Oct 6, 2022 at 2:04am (UTC) |
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
| aeb9364c-c8c4-4af3-8e0b-156edf6f4496 | Couldn't find the CGroup network priority controller | NEW | LOW | 4 | | 10.0.0.18 | Oct 6, 2022 at 8:36am (UTC) |
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
| f60f5ae8-c8fe-4b38-8805-b871d9e74a8d | Couldn't find the CGroup network priority controller | NEW | LOW | 4 | | 10.0.0.17 | Oct 7, 2022 at 3:08am (UTC) |
+--------------------------------------+------------------------------------------------------+--------+----------+-------+---------+-----------+-----------------------------+
So, by the way, it is possible to enable the cpu limit without restart?
Required information
Issue description
Command
lxc config set xxx limits.cpu not working
. I have notice that there is an issue https://github.com/lxc/lxd/issues/8896 and I can set the cpu limit through command likeecho 2-3 > /sys/fs/cgroup/lxc.payload.test1/cpuset.cpus
. So I wonder that is it a bug of lxd?I am using a lxd cluster:
Steps to reproduce
Information to attach
dmesg
)nothing relevant
lxc info NAME --show-log
)Resources: Processes: 30 CPU usage: CPU usage (in seconds): 167 Memory usage: Memory (current): 127.44MiB Swap (current): 35.23MiB Network usage: eth0: Type: broadcast State: UP Host interface: vetha000606 MAC address: 00:16:3e:f6:47:33 MTU: 1400 Bytes received: 13.06MB Bytes sent: 114.82MB Packets received: 47986 Packets sent: 11242 IP addresses: inet: 10.0.6.6/26 (global) inet6: fe80::216:3eff:fef6:4733/64 (link) lo: Type: loopback State: UP MTU: 65536 Bytes received: 2.72kB Bytes sent: 2.72kB Packets received: 24 Packets sent: 24 IP addresses: inet: 127.0.0.1/8 (local) inet6: ::1/128 (local)
Log:
lxc test1 20221006021051.220 WARN conf - ../src/src/lxc/conf.c:lxc_map_ids:3592 - newuidmap binary is missing lxc test1 20221006021051.221 WARN conf - ../src/src/lxc/conf.c:lxc_map_ids:3598 - newgidmap binary is missing lxc test1 20221006021051.225 WARN conf - ../src/src/lxc/conf.c:lxc_map_ids:3592 - newuidmap binary is missing lxc test1 20221006021051.225 WARN conf - ../src/src/lxc/conf.c:lxc_map_ids:3598 - newgidmap binary is missing
architecture: x86_64 config: cloud-init.network-config: | version: 1 config:
type: nameserver address: 114.114.114.114 cloud-init.user-data: |
cloud-config
packages: ['openssh-server'] apt: primary:
users:
lxc monitor
while reproducing the issue)location: 10.0.0.17 metadata: context: fingerprint: da084e52cadb5a294119ee0a913a3714522cfe5dcf47f3812cd2fc375e4edabf subject: CN=root@orangepizero,O=linuxcontainers.org level: debug message: Matched trusted cert timestamp: "2022-10-06T02:57:27.804069328Z" type: logging
location: 10.0.0.17 metadata: context: raftMembers: '[{{1 10.0.0.15:8443 voter} 10.0.0.15} {{2 10.0.0.16:8443 voter} 10.0.0.16} {{3 10.0.0.18:8443 spare} 10.0.0.18} {{4 10.0.0.17:8443 voter} 10.0.0.17}]' level: debug message: Replace current raft nodes timestamp: "2022-10-06T02:57:27.806430336Z" type: logging
location: 10.0.0.15 metadata: context: err: 'Failed to send heartbeat request: Put "https://10.0.0.18:8443/internal/database": context deadline exceeded (Client.Timeout exceeded while awaiting headers)' remote: 10.0.0.18:8443 level: warning message: Failed heartbeat timestamp: "2022-10-06T10:57:28.59878342+08:00" type: logging
location: 10.0.0.17 metadata: context: ip: '@' method: GET protocol: unix url: /1.0 username: root level: debug message: Handling API request timestamp: "2022-10-06T02:57:31.979611783Z" type: logging
location: 10.0.0.17 metadata: context: ip: '@' method: GET protocol: unix url: /1.0/instances/test1 username: root level: debug message: Handling API request timestamp: "2022-10-06T02:57:31.99040012Z" type: logging
location: 10.0.0.17 metadata: context: ip: '@' method: GET protocol: unix url: /1.0/events username: root level: debug message: Handling API request timestamp: "2022-10-06T02:57:32.118770953Z" type: logging
location: 10.0.0.17 metadata: context: ip: '@' method: PUT protocol: unix url: /1.0/instances/test1 username: root level: debug message: Handling API request timestamp: "2022-10-06T02:57:32.122284915Z" type: logging
location: 10.0.0.17 metadata: context: id: cae678c8-0e5a-49ab-b966-938706d6aef4 local: /var/snap/lxd/common/lxd/unix.socket remote: '@' level: debug message: Event listener server handler started timestamp: "2022-10-06T02:57:32.140826359Z" type: logging
location: 10.0.0.17 metadata: context: class: task description: Updating instance operation: 3eab680f-2784-48c4-8568-1d36e0456ea0 project: default level: debug message: New operation timestamp: "2022-10-06T02:57:32.324175107Z" type: logging
location: 10.0.0.17 metadata: context: class: task description: Updating instance operation: 3eab680f-2784-48c4-8568-1d36e0456ea0 project: default level: debug message: Started operation timestamp: "2022-10-06T02:57:32.324620336Z" type: logging
location: 10.0.0.17 metadata: class: task created_at: "2022-10-06T02:57:32.274166249Z" description: Updating instance err: "" id: 3eab680f-2784-48c4-8568-1d36e0456ea0 location: 10.0.0.17 may_cancel: false metadata: null resources: containers:
location: 10.0.0.17 metadata: class: task created_at: "2022-10-06T02:57:32.274166249Z" description: Updating instance err: "" id: 3eab680f-2784-48c4-8568-1d36e0456ea0 location: 10.0.0.17 may_cancel: false metadata: null resources: containers:
location: 10.0.0.17 metadata: context: action: update instance: test1 project: default reusable: "false" level: debug message: Instance operation lock created timestamp: "2022-10-06T02:57:32.325991409Z" type: logging
location: 10.0.0.17 metadata: context: ip: '@' method: GET protocol: unix url: /1.0/operations/3eab680f-2784-48c4-8568-1d36e0456ea0 username: root level: debug message: Handling API request timestamp: "2022-10-06T02:57:32.329003899Z" type: logging
location: 10.0.0.17 metadata: context: instance: test1 project: default level: debug message: UpdateInstanceBackupFile started timestamp: "2022-10-06T02:57:32.680359687Z" type: logging
location: 10.0.0.17 metadata: context: driver: btrfs pool: local refCount: "1" volName: test1 level: debug message: Skipping unmount as in use timestamp: "2022-10-06T02:57:32.82391784Z" type: logging
location: 10.0.0.17 metadata: context: action: update err:
instance: test1
project: default
reusable: "false"
level: debug
message: Instance operation lock finished
timestamp: "2022-10-06T02:57:32.825506097Z"
type: logging
location: 10.0.0.17 metadata: class: task created_at: "2022-10-06T02:57:32.274166249Z" description: Updating instance err: "" id: 3eab680f-2784-48c4-8568-1d36e0456ea0 location: 10.0.0.17 may_cancel: false metadata: null resources: containers:
location: 10.0.0.17 metadata: context: class: task description: Updating instance operation: 3eab680f-2784-48c4-8568-1d36e0456ea0 project: default level: debug message: Success for operation timestamp: "2022-10-06T02:57:32.825763337Z" type: logging
location: 10.0.0.17 metadata: action: instance-updated source: /1.0/instances/test1 project: default timestamp: "2022-10-06T02:57:32.825321852Z" type: lifecycle
location: 10.0.0.17 metadata: context: instance: test1 project: default level: debug message: UpdateInstanceBackupFile finished timestamp: "2022-10-06T02:57:32.825128872Z" type: logging
location: 10.0.0.17 metadata: context: listener: cae678c8-0e5a-49ab-b966-938706d6aef4 local: /var/snap/lxd/common/lxd/unix.socket remote: '@' level: debug message: Event listener server handler stopped timestamp: "2022-10-06T02:57:32.833234761Z" type: logging
location: 10.0.0.17 metadata: context: fingerprint: da084e52cadb5a294119ee0a913a3714522cfe5dcf47f3812cd2fc375e4edabf subject: CN=root@orangepizero,O=linuxcontainers.org level: debug message: Matched trusted cert timestamp: "2022-10-06T02:57:34.938564523Z" type: logging
location: 10.0.0.17 metadata: context: raftMembers: '[{{4 10.0.0.17:8443 voter} 10.0.0.17} {{1 10.0.0.15:8443 voter} 10.0.0.15} {{2 10.0.0.16:8443 voter} 10.0.0.16} {{3 10.0.0.18:8443 spare} 10.0.0.18}]' level: debug message: Replace current raft nodes timestamp: "2022-10-06T02:57:34.941327009Z" type: logging
location: 10.0.0.17 metadata: context: attempt: "0" err: 'Delete "operations": database is locked' level: debug message: Database error, retrying timestamp: "2022-10-06T02:57:37.847404316Z" type: logging
location: 10.0.0.15 metadata: context: err: 'Failed to send heartbeat request: Put "https://10.0.0.18:8443/internal/database": context deadline exceeded (Client.Timeout exceeded while awaiting headers)' remote: 10.0.0.18:8443 level: warning message: Failed heartbeat timestamp: "2022-10-06T10:57:38.423984104+08:00" type: logging
location: 10.0.0.17 metadata: context: fingerprint: da084e52cadb5a294119ee0a913a3714522cfe5dcf47f3812cd2fc375e4edabf subject: CN=root@orangepizero,O=linuxcontainers.org level: debug message: Matched trusted cert timestamp: "2022-10-06T02:57:47.709804613Z" type: logging
location: 10.0.0.17 metadata: context: raftMembers: '[{{1 10.0.0.15:8443 voter} 10.0.0.15} {{2 10.0.0.16:8443 voter} 10.0.0.16} {{3 10.0.0.18:8443 spare} 10.0.0.18} {{4 10.0.0.17:8443 voter} 10.0.0.17}]' level: debug message: Replace current raft nodes timestamp: "2022-10-06T02:57:47.712208005Z" type: logging
location: 10.0.0.15 metadata: context: err: 'Failed to send heartbeat request: Put "https://10.0.0.18:8443/internal/database": context deadline exceeded (Client.Timeout exceeded while awaiting headers)' remote: 10.0.0.18:8443 level: warning message: Failed heartbeat timestamp: "2022-10-06T10:57:50.53197956+08:00" type: logging
location: 10.0.0.17 metadata: context: fingerprint: da084e52cadb5a294119ee0a913a3714522cfe5dcf47f3812cd2fc375e4edabf subject: CN=root@orangepizero,O=linuxcontainers.org level: debug message: Matched trusted cert timestamp: "2022-10-06T02:57:53.445618544Z" type: logging
location: 10.0.0.17 metadata: context: raftMembers: '[{{2 10.0.0.16:8443 voter} 10.0.0.16} {{3 10.0.0.18:8443 spare} 10.0.0.18} {{4 10.0.0.17:8443 voter} 10.0.0.17} {{1 10.0.0.15:8443 voter} 10.0.0.15}]' level: debug message: Replace current raft nodes timestamp: "2022-10-06T02:57:53.448173025Z" type: logging
location: 10.0.0.15 metadata: context: err: 'Failed to send heartbeat request: Put "https://10.0.0.18:8443/internal/database": context deadline exceeded (Client.Timeout exceeded while awaiting headers)' remote: 10.0.0.18:8443 level: warning message: Failed heartbeat timestamp: "2022-10-06T10:57:59.077516382+08:00" type: logging