Closed DeftaSebastian closed 1 month ago
Hello @DeftaSebastian, thank you for the detailed report of this issue.
We have seen the warning for the systemd cgroups in the past too. It is in our TODO list to find the cause and address this warning, but in the past it did not seem to affect urunc
execution. Would it be possible for you to post the containerd config file?
From the logs it seems that urunc
reaches the point of executing the VMM (Solo5) to boot the unikernel. However, it seems that something did not go well with the VMM boot. Would it be possible for you to try out a few more images? For instance, you can try:
You can also try the end-to-end tests of urunc
with the make test_ctr
or make test_nerdctl
in the top directory of the urunc cloned repository. Maybe at first it would be better to execute the ctr tests, since they are much less compared to the nerdctl ones.
Also, are you using a VM to test urunc
? If that is the case, maybe the issue is related to nested virtualization (?). Could you also verify that nested virtualization is enabled?
Thank you for answering so fast. Here is my containerd/config.toml: disabled_plugins = [] imports = [] oom_score = 0 plugin_dir = "" required_plugins = [] root = "/var/lib/containerd" state = "/run/containerd" temp = "" version = 2
[cgroup] path = ""
[debug] address = "" format = "" gid = 0 level = "" uid = 0
[grpc] address = "/run/containerd/containerd.sock" gid = 0 max_recv_message_size = 16777216 max_send_message_size = 16777216 tcp_address = "" tcp_tls_ca = "" tcp_tls_cert = "" tcp_tls_key = "" uid = 0
[metrics] address = "" grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"] deletion_threshold = 0 mutation_threshold = 100 pause_threshold = 0.02 schedule_delay = "0s" startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"] cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] device_ownership_from_security_context = false disable_apparmor = false disable_cgroup = false disable_hugetlb_controller = true disable_proc_mount = false disable_tcp_service = true drain_exec_sync_io_timeout = "0s" enable_cdi = false enable_selinux = false enable_tls_streaming = false enable_unprivileged_icmp = false enable_unprivileged_ports = false ignore_deprecation_warnings = [] ignore_image_defined_volumes = false image_pull_progress_timeout = "5m0s" image_pull_with_sync_fs = false max_concurrent_downloads = 3 max_container_log_line_size = 16384 netns_mounts_under_state_dir = false restrict_oom_score_adj = false sandbox_image = "registry.k8s.io/pause:3.8" selinux_category_range = 1024 stats_collect_period = 10 stream_idle_timeout = "4h0m0s" stream_server_address = "127.0.0.1" stream_server_port = "0" systemd_cgroup = false tolerate_missing_hugetlb_controller = true unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
setup_serially = false
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_blockio_not_enabled_errors = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
sandbox_mode = ""
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
sandbox_mode = "podsandbox"
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = false
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
privileged_without_host_devices_all_devices_allowed = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
sandbox_mode = ""
snapshotter = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = ""
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"] path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"] interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
[plugins."io.containerd.metadata.v1.bolt"] content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"] no_prometheus = false
[plugins."io.containerd.nri.v1.nri"] disable = true disable_connections = false plugin_config_path = "/etc/nri/conf.d" plugin_path = "/opt/nri/plugins" plugin_registration_timeout = "5s" plugin_request_timeout = "2s" socket_path = "/var/run/nri/nri.sock"
[plugins."io.containerd.runtime.v1.linux"] no_shim = false runtime = "runc" runtime_root = "" shim = "containerd-shim" shim_debug = false
[plugins."io.containerd.runtime.v2.task"] platforms = ["linux/amd64"] sched_core = false
[plugins."io.containerd.service.v1.diff-service"] default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"] blockio_config_file = "" rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"] root_path = ""
[plugins."io.containerd.snapshotter.v1.blockfile"] fs_type = "" mount_options = [] root_path = "" scratch_file = ""
[plugins."io.containerd.snapshotter.v1.btrfs"] root_path = ""
[plugins."io.containerd.snapshotter.v1.native"] root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"] mount_options = [] root_path = "" sync_remove = false upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"] root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
[plugins."io.containerd.transfer.v1.local"] config_path = "" max_concurrent_downloads = 3 max_concurrent_uploaded_layers = 3
[[plugins."io.containerd.transfer.v1.local".unpack_config]]
differ = ""
platform = "linux/amd64"
snapshotter = "overlayfs"
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts] "io.containerd.timeout.bolt.open" = "0s" "io.containerd.timeout.metrics.shimstats" = "2s" "io.containerd.timeout.shim.cleanup" = "5s" "io.containerd.timeout.shim.load" = "5s" "io.containerd.timeout.shim.shutdown" = "3s" "io.containerd.timeout.task.state" = "2s"
[ttrpc] address = "" gid = 0 uid = 0
[plugins."io.containerd.snapshotter.v1.devmapper"] pool_name = "containerd-pool" root_path = "/var/lib/containerd/io.containerd.snapshotter.v1.devmapper" base_image_size = "10GB" discard_blocks = true fs_type = "ext2" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.urunc] runtime_type = "io.containerd.urunc.v2" container_annotations = ["com.urunc.unikernel."] pod_annotations = ["com.urunc.unikernel."] snapshotter = "devmapper"
The two images that you sent seem to be working well.
The tests for test_ctr all pass: Testing ctr === RUN TestCtrHvtRumprun --- PASS: TestCtrHvtRumprun (7.40s) === RUN TestCtrSptRumprun --- PASS: TestCtrSptRumprun (1.93s) === RUN TestCtrQemuUnikraftNginx --- PASS: TestCtrQemuUnikraftNginx (0.59s) === RUN TestCtrFCUnikraftNginx --- PASS: TestCtrFCUnikraftNginx (2.51s) PASS ok github.com/nubificus/urunc/tests/ctr 12.427s
While the nerdctl tests seem to be having some trouble: esting nerdctl === RUN TestNerdctlHvtRumprunHello nerdctl_test.go:23: exit status 159: Error executing rumprun unikernel with solo5-hvt using nerdctl: time="2024-08-19T13:41:57Z" level=warning msg="cannot set cgroup manager to \"systemd\" for runtime \"io.containerd.urunc.v2\"" --- FAIL: TestNerdctlHvtRumprunHello (0.36s) === RUN TestNerdctlHvtRumprunRedis nerdctl_test.go:35: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlHvtRumprunRedis (0.12s) === RUN TestNerdctlHvtSeccompOn nerdctl_test.go:44: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlHvtSeccompOn (0.07s) === RUN TestNerdctlHvtSeccompOff nerdctl_test.go:53: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlHvtSeccompOff (0.07s) === RUN TestNerdctlSptRumprunRedis --- PASS: TestNerdctlSptRumprunRedis (14.68s) === RUN TestNerdctlQemuUnikraftRedis --- PASS: TestNerdctlQemuUnikraftRedis (14.43s) === RUN TestNerdctlQemuUnikraftNginx --- PASS: TestNerdctlQemuUnikraftNginx (14.39s) === RUN TestNerdctlQemuSeccompOn --- PASS: TestNerdctlQemuSeccompOn (6.40s) === RUN TestNerdctlQemuSeccompOff --- PASS: TestNerdctlQemuSeccompOff (6.39s) === RUN TestNerdctlFCUnikraftNginx --- FAIL: TestNerdctlFCUnikraftNginx (12.18s) panic: interface conversion: interface {} is nil, not map[string]interface {} [recovered] panic: interface conversion: interface {} is nil, not map[string]interface {}
goroutine 90 [running]: testing.tRunner.func1.2({0x5b48a0, 0xc000160a50}) /usr/local/go/src/testing/testing.go:1526 +0x24e testing.tRunner.func1() /usr/local/go/src/testing/testing.go:1529 +0x39f panic({0x5b48a0, 0xc000160a50}) /usr/local/go/src/runtime/panic.go:884 +0x213 github.com/nubificus/urunc/tests/nerdctl.findUnikernelIP({0xc0000e05f0?, 0x33?}) /home/ubuntu/orchid/urunc/tests/nerdctl/nerdctl_test.go:264 +0x2e9 github.com/nubificus/urunc/tests/nerdctl.nerdctlTest({0x5e9b9f?, 0x4e4f65?}, {0x5f2c4e?, 0x0?}, 0x0?) /home/ubuntu/orchid/urunc/tests/nerdctl/nerdctl_test.go:188 +0xa6 github.com/nubificus/urunc/tests/nerdctl.TestNerdctlFCUnikraftNginx(0xc000087520) /home/ubuntu/orchid/urunc/tests/nerdctl/nerdctl_test.go:105 +0x38 testing.tRunner(0xc000087520, 0x5f53b0) /usr/local/go/src/testing/testing.go:1576 +0x10b created by testing.(*T).Run /usr/local/go/src/testing/testing.go:1629 +0x3ea FAIL github.com/nubificus/urunc/tests/nerdctl 69.097s FAIL make: [Makefile:54: test_nerdctl] Error 1 (ignored)
And for the last question, yes, I am using a virtual machine that has nested virtualization enabled. I would also like to mention that I have tried it on a normal machine as well, but it had the same result.
Hello @DeftaSebastian , with the help of @ananos we managed to reproduce the issue you mentioned. It seems that in newer versions of ubuntu (e.g. 22.04) there are some more system calls that are required to execute the Solo5-hvt VMM. Due to the use of seccomp, we were restring their usage and therefore the VMM execution was receiving a SIGSYS.
We identified the extra needed functions and we created a new branch seccomp_ubuntu_2204
with the updated seccomp filter. Would it be possible for you to try out the new branch? Moreover, you can disable seccomp when executing hvt unikernels using the --security-opt seccomp=unconfined
command line option in nerdctl.
Regarding the panic errors in the testing of Firecracker.: Is Firecracker installed? You can find instructions to install firecracker in urunc docs, but please use a version of Firecracker earlier than v.1.8.0
, since Unikraft seems to have issues with that version
After switching to the new branch the command "sudo nerdctl --debug run --rm -ti --snapshotter devmapper --runtime io.containerd.urunc.v2 harbor.nbfc.io/nubificus/urunc/redis-hvt-rump:latest unikernel" started working properly. The warning did not go away, but it works. It also worked on the previous branch with the --security-opt seccomp=unconfined option.
Regarding the panic errors, I did not have firecracker installed, but even after installing it some tests seem not to be able to pass: Testing nerdctl === RUN TestNerdctlHvtRumprunHello --- PASS: TestNerdctlHvtRumprunHello (1.20s) === RUN TestNerdctlHvtRumprunRedis nerdctl_test.go:35: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlHvtRumprunRedis (0.18s) === RUN TestNerdctlHvtSeccompOn nerdctl_test.go:44: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlHvtSeccompOn (0.09s) === RUN TestNerdctlHvtSeccompOff nerdctl_test.go:53: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlHvtSeccompOff (0.09s) === RUN TestNerdctlSptRumprunRedis --- PASS: TestNerdctlSptRumprunRedis (14.69s) === RUN TestNerdctlQemuUnikraftRedis --- PASS: TestNerdctlQemuUnikraftRedis (14.42s) === RUN TestNerdctlQemuUnikraftNginx --- PASS: TestNerdctlQemuUnikraftNginx (14.41s) === RUN TestNerdctlQemuSeccompOn --- PASS: TestNerdctlQemuSeccompOn (6.44s) === RUN TestNerdctlQemuSeccompOff --- PASS: TestNerdctlQemuSeccompOff (6.53s) === RUN TestNerdctlFCUnikraftNginx nerdctl_test.go:107: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlFCUnikraftNginx (0.04s) === RUN TestNerdctlFCSeccompOn nerdctl_test.go:116: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlFCSeccompOn (0.12s) === RUN TestNerdctlFCSeccompOff nerdctl_test.go:125: Failed to start unikernel container: - exit status 1 --- FAIL: TestNerdctlFCSeccompOff (0.09s) FAIL FAIL github.com/nubificus/urunc/tests/nerdctl 58.307s FAIL make: [Makefile:54: test_nerdctl] Error 1 (ignored)
The firecracker version is v1.7.0
Some of the tests might fail because of "uncleaned" past containers from previous tests. For example the hvt tests that fail are using the exact same image as the one in your nerdctl command harbor.nbfc.io/nubificus/urunc/redis-hvt-rump:latest
. Similarly, in the case of Firecracker, we use the harbor.nbfc.io/nubificus/urunc/nginx-fc-unik
image You can try and execute it manually outside the tests and see if it works.
To check if there are "uncleaned" containers you can use the nerdctl ps -a
command. You might notice that the containers listed in this command have a specific name. Our tests use that name for the containers and therefore, if a container with the same name exists, starting a new one will fail. After stopping and removing all the containers shown from this command the tests will hopefully succeed. The reason of "uncleaned" containers was the panic in the testing. The panic occurred earlier than the cleaning step and the containers were not removed.
We are also working on refactoring our e2e testing and hopefully all these issues will get resolved.
Removing the containers made it pass the tests. Thank you for your help!
seems to be resolved, please re-open if you see any encounter any additional issues. Will close when the relevant PR is merged.
Description
On a clean machine, following the urunc install guide and then running the redis unikernel image results in the warning "cannot set cgroup manager to "systemd" for runtime "io.containerd.urunc.v2". After the warning, no unikernel runs.
Logs
These are the logs after running "sudo nerdctl --debug run --rm -ti --snapshotter devmapper --runtime io.containerd.urunc.v2 harbor.nbfc.io/nubificus/urunc/redis-hvt-rump:latest unikernel": DEBU[0000] verifying process skipped
DEBU[0000] generated log driver: binary:///usr/local/bin/nerdctl?_NERDCTL_INTERNAL_LOGGING=%2Fvar%2Flib%2Fnerdctl%2F1935db59 DEBU[0000] detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: /run/systemd/resolve/resolv.conf WARN[0000] cannot set cgroup manager to "systemd" for runtime "io.containerd.urunc.v2" DEBU[0000] remote introspection plugin filters filters="[type==io.containerd.snapshotter.v1, id==devmapper]"
And these are the logs for containerd: sudo journalctl -u containerd: Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.543839525Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.urunc.v> Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.544208142Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.urunc> Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.544296194Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.urunc.v2 typ> Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.544543448Z" level=info msg="starting signal loop" namespace=default path=/run/containerd/io.containerd.runtime.v2> Aug 19 10:57:49 sebi2-vm urunc[32198]: {"args":["urunc","--root","/run/containerd/runc/default","--log","/run/containerd/io.containerd.runtime.v2.task/default/6e9f2c4f77edad249d10f0d6> Aug 19 10:57:49 sebi2-vm urunc[32198]: {"level":"error","msg":"handleQueueProxy","time":"2024-08-19T10:57:49Z"} Aug 19 10:57:49 sebi2-vm urunc[32198]: {"hypervisor":"","initrd":"","level":"info","msg":"urunc annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","unikernelBinary> Aug 19 10:57:49 sebi2-vm urunc[32198]: {"hypervisor":"aHZ0","initrd":"","level":"info","msg":"urunc.json annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","uniker> Aug 19 10:57:49 sebi2-vm urunc[32198]: {"level":"info","msg":"This is a bima container! Proceeding...","time":"2024-08-19T10:57:49Z"} Aug 19 10:57:49 sebi2-vm urunc[32198]: {"hypervisor":"","initrd":"","level":"info","msg":"urunc annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","unikernelBinary> Aug 19 10:57:49 sebi2-vm urunc[32198]: {"hypervisor":"aHZ0","initrd":"","level":"info","msg":"urunc.json annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","uniker> Aug 19 10:57:49 sebi2-vm /usr/local/bin/urunc[32206]: {"args":["/usr/local/bin/urunc","--root","/run/containerd/runc/default","--log","/run/containerd/io.containerd.runtime.v2.task/de> Aug 19 10:57:49 sebi2-vm /usr/local/bin/urunc[32206]: {"hypervisor":"","initrd":"","level":"info","msg":"urunc annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","> Aug 19 10:57:49 sebi2-vm /usr/local/bin/urunc[32206]: {"hypervisor":"aHZ0","initrd":"","level":"info","msg":"urunc.json annotations","subsystem":"unikontainers","time":"2024-08-19T10:> Aug 19 10:57:49 sebi2-vm /usr/local/bin/urunc[32206]: {"level":"info","msg":"This is a bima container! Proceeding...","time":"2024-08-19T10:57:49Z"} Aug 19 10:57:49 sebi2-vm urunc[32198]: {"args":["/usr/local/bin/nerdctl","--snapshotter=devmapper","internal","oci-hook","createRuntime"],"cmd":"/usr/local/bin/nerdctl --snapshotter=d> Aug 19 10:57:49 sebi2-vm urunc[32276]: {"args":["urunc","--root","/run/containerd/runc/default","--log","/run/containerd/io.containerd.runtime.v2.task/default/6e9f2c4f77edad249d10f0d6> Aug 19 10:57:49 sebi2-vm urunc[32276]: {"hypervisor":"","initrd":"","level":"info","msg":"urunc annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","unikernelBinary> Aug 19 10:57:49 sebi2-vm urunc[32276]: {"hypervisor":"aHZ0","initrd":"","level":"info","msg":"urunc.json annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","uniker> Aug 19 10:57:49 sebi2-vm urunc[32276]: {"level":"info","msg":"This is a bima container! Proceeding...","time":"2024-08-19T10:57:49Z"} Aug 19 10:57:49 sebi2-vm /usr/local/bin/urunc[32206]: {"level":"info","msg":"calling vmm execve","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z"} Aug 19 10:57:49 sebi2-vm /usr/local/bin/urunc[32206]: {"level":"info","msg":"Loaded seccomp filters","subsystem":"hypervisors","time":"2024-08-19T10:57:49Z"} Aug 19 10:57:49 sebi2-vm /usr/local/bin/urunc[32206]: {"hvt command":"/usr/local/bin/solo5-hvt --mem=512 --net:tap=tap0_urunc --block:rootfs=/dev/dm-2 /run/containerd/io.containerd.ru> Aug 19 10:57:49 sebi2-vm urunc[32300]: {"args":["urunc","--root","/run/containerd/runc/default","--log","/run/containerd/io.containerd.runtime.v2.task/default/6e9f2c4f77edad249d10f0d6> Aug 19 10:57:49 sebi2-vm urunc[32300]: {"hypervisor":"","initrd":"","level":"info","msg":"urunc annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","unikernelBinary> Aug 19 10:57:49 sebi2-vm urunc[32300]: {"hypervisor":"aHZ0","initrd":"","level":"info","msg":"urunc.json annotations","subsystem":"unikontainers","time":"2024-08-19T10:57:49Z","uniker> Aug 19 10:57:49 sebi2-vm urunc[32300]: {"level":"info","msg":"This is a bima container! Proceeding...","time":"2024-08-19T10:57:49Z"} Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.731956390Z" level=info msg="shim disconnected" id=6e9f2c4f77edad249d10f0d631aac437d6e7c853cb57b319a7dde34875d1c4b> Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.732117273Z" level=warning msg="cleaning up after shim disconnected" id=6e9f2c4f77edad249d10f0d631aac437d6e7c853cb> Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.732182624Z" level=info msg="cleaning up dead shim" namespace=default Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.732350157Z" level=error msg="failed to delete" cmd="/usr/local/bin/containerd-shim-urunc-v2 -namespace default -a> Aug 19 10:57:49 sebi2-vm containerd[31919]: time="2024-08-19T10:57:49.732454719Z" level=warning msg="failed to clean up after shim disconnected" error=": fork/exec /usr/local/bin/cont>
System info
Steps to reproduce
Follow the steps at docs/Installation.md Command: sudo nerdctl run --rm -ti --snapshotter devmapper --runtime io.containerd.urunc.v2 harbor.nbfc.io/nubificus/urunc/redis-hvt-rump:latest unikernel