Closed amorenoz closed 4 years ago
ouch. /cc @dagrh to find the relevant folks..
@amorenoz, thanks for the report!
I've provided a simple patch that fixes the issue. A long-term measure should also be adopted in order to ensure we check for every config file containing a path and ensure kata doesn't crash on those. But that's material for a different PR.
Description of problem
If /usr/libexec/virtiofsd is not present in the system, kata-runtime crashes
Expected result
kata-runtime should report the error and exit gracefully
Actual result
kata-runtime crashes:
Show kata-collect-data.sh details
# Meta details Running `kata-collect-data.sh` version `1.10.0 (commit 11233123ea2ff2aad72f6ebdbca2f6d195a093a0-dirty)` at `2020-04-03.11:58:41.845601458+0000`. --- Runtime is `/usr/bin/kata-runtime`. # `kata-env` Output of "`/usr/bin/kata-runtime kata-env`": ```toml [Meta] Version = "1.0.23" [Runtime] Debug = false Trace = false DisableGuestSeccomp = true DisableNewNetNs = false SandboxCgroupOnly = false Path = "/usr/bin/kata-runtime" [Runtime.Version] Semver = "1.10.0" Commit = "11233123ea2ff2aad72f6ebdbca2f6d195a093a0-dirty" OCI = "1.0.1-dev" [Runtime.Config] Path = "/usr/share/kata-containers/defaults/configuration.toml" [Hypervisor] MachineType = "q35" Version = "QEMU emulator version 2.12.0 (qemu-kvm-2.12.0-99.module+el8.2.0+5827+8c39933c)\nCopyright (c) 2003-2017 Fabrice Bellard and the QEMU Project developers" Path = "/usr/libexec/qemu-kvm" BlockDeviceDriver = "virtio-scsi" EntropySource = "/dev/urandom" Msize9p = 8192 MemorySlots = 10 Debug = false UseVSock = true SharedFS = "virtio-fs" [Image] Path = "" [Kernel] Path = "/usr/lib/modules/4.18.0-176.el8.x86_64/vmlinuz" Parameters = "systemd.unified_cgroup_hierarchy=0 agent.debug_console" [Initrd] Path = "/var/cache/kata-containers/osbuilder-images/4.18.0-176.el8.x86_64/fedora-kata-4.18.0-176.el8.x86_64.initrd" [Proxy] Type = "noProxy" Version = "" Path = "" Debug = false [Shim] Type = "kataShim" Version = "kata-shim version 1.10.0" Path = "/usr/libexec/kata-containers/kata-shim" Debug = false [Agent] Type = "kata" Debug = false Trace = false TraceMode = "" TraceType = "" [Host] Kernel = "4.18.0-176.el8.x86_64" Architecture = "amd64" VMContainerCapable = true SupportVSocks = true [Host.Distro] Name = "Red Hat Enterprise Linux CoreOS" Version = "4.3" [Host.CPU] Vendor = "GenuineIntel" Model = "Intel(R) Xeon(R) CPU @ 2.30GHz" [Netmon] Version = "kata-netmon version 1.10.0" Path = "/usr/libexec/kata-containers/kata-netmon" Debug = false Enable = false ``` --- # Runtime config files ## Runtime default config files ``` /etc/kata-containers/configuration.toml /usr/share/kata-containers/defaults/configuration.toml ``` ## Runtime config file contents Config file `/etc/kata-containers/configuration.toml` not found Config file `/usr/share/defaults/kata-containers/configuration.toml` not found Output of "`cat "/usr/share/kata-containers/defaults/configuration.toml"`": ```toml # Copyright (c) 2017-2019 Intel Corporation # # SPDX-License-Identifier: Apache-2.0 # # XXX: WARNING: this file is auto-generated. # XXX: # XXX: Source file: "cli/config/configuration-qemu.toml.in" # XXX: Project: # XXX: Name: Kata Containers # XXX: Type: kata [hypervisor.qemu] path = "/usr/libexec/qemu-kvm" kernel = "/var/cache/kata-containers/vmlinuz.container" initrd = "/var/cache/kata-containers/kata-containers-initrd.img" #image = "/var/cache/kata-containers/kata-containers.img" machine_type = "q35" # Optional space-separated list of options to pass to the guest kernel. # For example, use `kernel_params = "vsyscall=emulate"` if you are having # trouble running pre-2.15 glibc. # # WARNING: - any parameter specified here will take priority over the default # parameter value of the same name used to start the virtual machine. # Do not set values here unless you understand the impact of doing so as you # may stop the virtual machine from booting. # To see the list of default parameters, enable hypervisor debug, create a # container and look for 'default-kernel-parameters' log entries. kernel_params = "systemd.unified_cgroup_hierarchy=0 agent.debug_console" # Path to the firmware. # If you want that qemu uses the default firmware leave this option empty firmware = "" # Machine accelerators # comma-separated list of machine accelerators to pass to the hypervisor. # For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"` machine_accelerators="" # Default number of vCPUs per SB/VM: # unspecified or 0 --> will be set to 1 # < 0 --> will be set to the actual number of physical cores # > 0 <= number of physical cores --> will be set to the specified number # > number of physical cores --> will be set to the actual number of physical cores default_vcpus = 1 # Default maximum number of vCPUs per SB/VM: # unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number # of vCPUs supported by KVM if that number is exceeded # > 0 <= number of physical cores --> will be set to the specified number # > number of physical cores --> will be set to the actual number of physical cores or to the maximum number # of vCPUs supported by KVM if that number is exceeded # WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when # the actual number of physical cores is greater than it. # WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU # the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs # can be added to a SB/VM, but the memory footprint will be big. Another example, with # `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of # vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable, # unless you know what are you doing. default_maxvcpus = 0 # Bridges can be used to hot plug devices. # Limitations: # * Currently only pci bridges are supported # * Until 30 devices per bridge can be hot plugged. # * Until 5 PCI bridges can be cold plugged per VM. # This limitation could be a bug in qemu or in the kernel # Default number of bridges per SB/VM: # unspecified or 0 --> will be set to 1 # > 1 <= 5 --> will be set to the specified number # > 5 --> will be set to 5 default_bridges = 1 # Default memory size in MiB for SB/VM. # If unspecified then it will be set 2048 MiB. default_memory = 2048 # # Default memory slots per SB/VM. # If unspecified then it will be set 10. # This is will determine the times that memory will be hotadded to sandbox/VM. #memory_slots = 10 # The size in MiB will be plused to max memory of hypervisor. # It is the memory address space for the NVDIMM devie. # If set block storage driver (block_device_driver) to "nvdimm", # should set memory_offset to the size of block device. # Default 0 #memory_offset = 0 # Disable block device from being used for a container's rootfs. # In case of a storage driver like devicemapper where a container's # root file system is backed by a block device, the block device is passed # directly to the hypervisor for performance reasons. # This flag prevents the block device from being passed to the hypervisor, # 9pfs is used instead to pass the rootfs. disable_block_device_use = false # Shared file system type: # - virtio-9p (default) # - virtio-fs shared_fs = "virtio-fs" # Path to vhost-user-fs daemon. virtio_fs_daemon = "/usr/libexec/virtiofsd" # Default size of DAX cache in MiB virtio_fs_cache_size = 0 # Extra args for virtiofsd daemon # # Format example: # ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"] # # see `virtiofsd -h` for possible options. virtio_fs_extra_args = [] # Cache mode: # # - none # Metadata, data, and pathname lookup are not cached in guest. They are # always fetched from host and any changes are immediately pushed to host. # # - auto # Metadata and pathname lookup cache expires after a configured amount of # time (default is 1 second). Data is cached while the file is open (close # to open consistency). # # - always # Metadata, data, and pathname lookup are cached in guest and never expire. virtio_fs_cache = "always" # Block storage driver to be used for the hypervisor in case the container # rootfs is backed by a block device. This is virtio-scsi, virtio-blk # or nvdimm. block_device_driver = "virtio-scsi" # Specifies cache-related options will be set to block devices or not. # Default false #block_device_cache_set = true # Specifies cache-related options for block devices. # Denotes whether use of O_DIRECT (bypass the host page cache) is enabled. # Default false #block_device_cache_direct = true # Specifies cache-related options for block devices. # Denotes whether flush requests for the device are ignored. # Default false #block_device_cache_noflush = true # Enable iothreads (data-plane) to be used. This causes IO to be # handled in a separate IO thread. This is currently only implemented # for SCSI. # enable_iothreads = false # Enable pre allocation of VM RAM, default false # Enabling this will result in lower container density # as all of the memory will be allocated and locked # This is useful when you want to reserve all the memory # upfront or in the cases where you want memory latencies # to be very predictable # Default false #enable_mem_prealloc = true # Enable huge pages for VM RAM, default false # Enabling this will result in the VM memory # being allocated using huge pages. # This is useful when you want to use vhost-user network # stacks within the container. This will automatically # result in memory pre allocation #enable_hugepages = true # Enable file based guest memory support. The default is an empty string which # will disable this feature. In the case of virtio-fs, this is enabled # automatically and '/dev/shm' is used as the backing folder. # This option will be ignored if VM templating is enabled. #file_mem_backend = "" # Enable swap of vm memory. Default false. # The behaviour is undefined if mem_prealloc is also set to true #enable_swap = true # This option changes the default hypervisor and kernel parameters # to enable debug output where available. This extra output is added # to the proxy logs, but only when proxy debug is also enabled. # # Default false #enable_debug = true # Disable the customizations done in the runtime when it detects # that it is running on top a VMM. This will result in the runtime # behaving as it would when running on bare metal. # #disable_nesting_checks = true # This is the msize used for 9p shares. It is the number of bytes # used for 9p packet payload. #msize_9p = 8192 # If true and vsocks are supported, use vsocks to communicate directly # with the agent and no proxy is started, otherwise use unix # sockets and start a proxy to communicate with the agent. # Default false use_vsock = true # VFIO devices are hotplugged on a bridge by default. # Enable hotplugging on root bus. This may be required for devices with # a large PCI bar, as this is a current limitation with hotplugging on # a bridge. This value is valid for "pc" machine type. # Default false #hotplug_vfio_on_root_bus = true # If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off # security (vhost-net runs ring0) for network I/O performance. #disable_vhost_net = true # # Default entropy source. # The path to a host source of entropy (including a real hardware RNG) # /dev/urandom and /dev/random are two main options. # Be aware that /dev/random is a blocking source of entropy. If the host # runs out of entropy, the VMs boot time will increase leading to get startup # timeouts. # The source of entropy /dev/urandom is non-blocking and provides a # generally acceptable source of entropy. It should work well for pretty much # all practical purposes. #entropy_source= "/dev/urandom" # Path to OCI hook binaries in the *guest rootfs*. # This does not affect host-side hooks which must instead be added to # the OCI spec passed to the runtime. # # You can create a rootfs with hooks by customizing the osbuilder scripts: # https://github.com/kata-containers/osbuilder # # Hooks must be stored in a subdirectory of guest_hook_path according to their # hook type, i.e. "guest_hook_path/{prestart,postart,poststop}". # The agent will scan these directories for executable files and add them, in # lexicographical order, to the lifecycle of the guest container. # Hooks are executed in the runtime namespace of the guest. See the official documentation: # https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks # Warnings will be logged if any error is encountered will scanning for hooks, # but it will not abort container execution. #guest_hook_path = "/usr/share/oci/hooks" [factory] # VM templating support. Once enabled, new VMs are created from template # using vm cloning. They will share the same initial kernel, initramfs and # agent memory by mapping it readonly. It helps speeding up new container # creation and saves a lot of memory if there are many kata containers running # on the same host. # # When disabled, new VMs are created from scratch. # # Note: Requires "initrd=" to be set ("image=" is not supported). # # Default false #enable_template = true # Specifies the path of template. # # Default "/run/vc/vm/template" #template_path = "/run/vc/vm/template" # The number of caches of VMCache: # unspecified or == 0 --> VMCache is disabled # > 0 --> will be set to the specified number # # VMCache is a function that creates VMs as caches before using it. # It helps speed up new container creation. # The function consists of a server and some clients communicating # through Unix socket. The protocol is gRPC in protocols/cache/cache.proto. # The VMCache server will create some VMs and cache them by factory cache. # It will convert the VM to gRPC format and transport it when gets # requestion from clients. # Factory grpccache is the VMCache client. It will request gRPC format # VM and convert it back to a VM. If VMCache function is enabled, # kata-runtime will request VM from factory grpccache when it creates # a new sandbox. # # Default 0 #vm_cache_number = 0 # Specify the address of the Unix socket that is used by VMCache. # # Default /var/run/kata-containers/cache.sock #vm_cache_endpoint = "/var/run/kata-containers/cache.sock" [proxy.kata] path = "/usr/libexec/kata-containers/kata-proxy" # If enabled, proxy messages will be sent to the system log # (default: disabled) #enable_debug = true [shim.kata] path = "/usr/libexec/kata-containers/kata-shim" # If enabled, shim messages will be sent to the system log # (default: disabled) #enable_debug = true # If enabled, the shim will create opentracing.io traces and spans. # (See https://www.jaegertracing.io/docs/getting-started). # # Note: By default, the shim runs in a separate network namespace. Therefore, # to allow it to send trace details to the Jaeger agent running on the host, # it is necessary to set 'disable_new_netns=true' so that it runs in the host # network namespace. # # (default: disabled) #enable_tracing = true [agent.kata] # If enabled, make the agent display debug-level messages. # (default: disabled) #enable_debug = true # Enable agent tracing. # # If enabled, the default trace mode is "dynamic" and the # default trace type is "isolated". The trace mode and type are set # explicity with the `trace_type=` and `trace_mode=` options. # # Notes: # # - Tracing is ONLY enabled when `enable_tracing` is set: explicitly # setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing` # will NOT activate agent tracing. # # - See https://github.com/kata-containers/agent/blob/master/TRACING.md for # full details. # # (default: disabled) #enable_tracing = true # #trace_mode = "dynamic" #trace_type = "isolated" # Comma separated list of kernel modules and their parameters. # These modules will be loaded in the guest kernel using modprobe(8). # The following example can be used to load two kernel modules with parameters # - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"] # The first word is considered as the module name and the rest as its parameters. # Container will not be started when: # * A kernel module is specified and the modprobe command is not installed in the guest # or it fails loading the module. # * The module is not available in the guest or it doesn't met the guest kernel # requirements, like architecture and version. # kernel_modules=[] [netmon] # If enabled, the network monitoring process gets started when the # sandbox is created. This allows for the detection of some additional # network being added to the existing network namespace, after the # sandbox has been created. # (default: disabled) #enable_netmon = true # Specify the path to the netmon binary. path = "/usr/libexec/kata-containers/kata-netmon" # If enabled, netmon messages will be sent to the system log # (default: disabled) #enable_debug = true [runtime] # If enabled, the runtime will log additional debug messages to the # system log # (default: disabled) #enable_debug = true # # Internetworking model # Determines how the VM should be connected to the # the container network interface # Options: # # - macvtap # Used when the Container network interface can be bridged using # macvtap. # # - none # Used when customize network. Only creates a tap device. No veth pair. # # - tcfilter # Uses tc filter rules to redirect traffic from the network interface # provided by plugin to a tap interface connected to the VM. # internetworking_model="tcfilter" # disable guest seccomp # Determines whether container seccomp profiles are passed to the virtual # machine and applied by the kata agent. If set to true, seccomp is not applied # within the guest # (default: true) disable_guest_seccomp=true # If enabled, the runtime will create opentracing.io traces and spans. # (See https://www.jaegertracing.io/docs/getting-started). # (default: disabled) #enable_tracing = true # If enabled, the runtime will not create a network namespace for shim and hypervisor processes. # This option may have some potential impacts to your host. It should only be used when you know what you're doing. # `disable_new_netns` conflicts with `enable_netmon` # `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only # with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge # (like OVS) directly. # If you are using docker, `disable_new_netns` only works with `docker run --net=none` # (default: false) #disable_new_netns = true # if enabled, the runtime will add all the kata processes inside one dedicated cgroup. # The container cgroups in the host are not created, just one single cgroup per sandbox. # The sandbox cgroup is not constrained by the runtime # The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox. # The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation. # See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType sandbox_cgroup_only=false # Enabled experimental feature list, format: ["a", "b"]. # Experimental features are features not stable enough for production, # They may break compatibility, and are prepared for a big version bump. # Supported experimental features: # 1. "newstore": new persist storage driver which breaks backward compatibility, # expected to move out of experimental in 2.0.0. # (default: []) experimental=[] ``` --- # KSM throttler ## version Output of "`/usr/lib/systemd/system/kata-ksm-throttler.service --version`": ``` /usr/bin/kata-collect-data.sh: line 178: /usr/lib/systemd/system/kata-ksm-throttler.service: Permission denied ``` ## systemd service # Image details No image --- # Initrd details gzip: /var/cache/kata-containers/osbuilder-images/4.18.0-176.el8.x86_64/fedora-kata-4.18.0-176.el8.x86_64.initrd: not in gzip format ```yaml unknown ``` --- # Logfiles ## Runtime logs Recent runtime problems found in system journal: ``` time="2020-04-03T11:47:28.806611075Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=c26d814b4877265d91678f4cf7ff9c31a5148e62566d6a4cdb055319a95f0542 error="open /run/vc/sbs/c26d814b4877265d91678f4cf7ff9c31a5148e62566d6a4cdb055319a95f0542/devices.json: no such file or directory" name=kata-runtime pid=922097 sandbox=c26d814b4877265d91678f4cf7ff9c31a5148e62566d6a4cdb055319a95f0542 sandboxid=c26d814b4877265d91678f4cf7ff9c31a5148e62566d6a4cdb055319a95f0542 source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:28.985015808Z" level=warning msg="Failed to get container, force will not fail: Container ID (c26d814b4877265d91678f4cf7ff9c31a5148e62566d6a4cdb055319a95f0542) does not exist" arch=amd64 command=delete container=c26d814b4877265d91678f4cf7ff9c31a5148e62566d6a4cdb055319a95f0542 name=kata-runtime pid=922126 source=runtime time="2020-04-03T11:47:31.847915806Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=36174a3aac4ddb2ef43863ec2f6ea4e0ef4bd0b93abee9c43b5d21e46cb3d4f7 error="open /run/vc/sbs/36174a3aac4ddb2ef43863ec2f6ea4e0ef4bd0b93abee9c43b5d21e46cb3d4f7/devices.json: no such file or directory" name=kata-runtime pid=922305 sandbox=36174a3aac4ddb2ef43863ec2f6ea4e0ef4bd0b93abee9c43b5d21e46cb3d4f7 sandboxid=36174a3aac4ddb2ef43863ec2f6ea4e0ef4bd0b93abee9c43b5d21e46cb3d4f7 source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:31.998627209Z" level=warning msg="Failed to get container, force will not fail: Container ID (36174a3aac4ddb2ef43863ec2f6ea4e0ef4bd0b93abee9c43b5d21e46cb3d4f7) does not exist" arch=amd64 command=delete container=36174a3aac4ddb2ef43863ec2f6ea4e0ef4bd0b93abee9c43b5d21e46cb3d4f7 name=kata-runtime pid=922315 source=runtime time="2020-04-03T11:47:36.761891724Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=0432595135d3836c7642214174dfda589c63b7238d3b78b48ac3860be92d77d7 error="open /run/vc/sbs/0432595135d3836c7642214174dfda589c63b7238d3b78b48ac3860be92d77d7/devices.json: no such file or directory" name=kata-runtime pid=922556 sandbox=0432595135d3836c7642214174dfda589c63b7238d3b78b48ac3860be92d77d7 sandboxid=0432595135d3836c7642214174dfda589c63b7238d3b78b48ac3860be92d77d7 source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:36.912676534Z" level=warning msg="Failed to get container, force will not fail: Container ID (0432595135d3836c7642214174dfda589c63b7238d3b78b48ac3860be92d77d7) does not exist" arch=amd64 command=delete container=0432595135d3836c7642214174dfda589c63b7238d3b78b48ac3860be92d77d7 name=kata-runtime pid=922573 source=runtime time="2020-04-03T11:47:40.320461074Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=faab212d54fc4c00f084fb705cd2b82c60eaef16621d89beeebb4b45602abdd8 error="open /run/vc/sbs/faab212d54fc4c00f084fb705cd2b82c60eaef16621d89beeebb4b45602abdd8/devices.json: no such file or directory" name=kata-runtime pid=922863 sandbox=faab212d54fc4c00f084fb705cd2b82c60eaef16621d89beeebb4b45602abdd8 sandboxid=faab212d54fc4c00f084fb705cd2b82c60eaef16621d89beeebb4b45602abdd8 source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:40.404639122Z" level=warning msg="Failed to get container, force will not fail: Container ID (faab212d54fc4c00f084fb705cd2b82c60eaef16621d89beeebb4b45602abdd8) does not exist" arch=amd64 command=delete container=faab212d54fc4c00f084fb705cd2b82c60eaef16621d89beeebb4b45602abdd8 name=kata-runtime pid=922874 source=runtime time="2020-04-03T11:47:44.938942442Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=05cd90a6abdff35b324839f1617fa1e07142c13cd832a4b154862224f697d091 error="open /run/vc/sbs/05cd90a6abdff35b324839f1617fa1e07142c13cd832a4b154862224f697d091/devices.json: no such file or directory" name=kata-runtime pid=923197 sandbox=05cd90a6abdff35b324839f1617fa1e07142c13cd832a4b154862224f697d091 sandboxid=05cd90a6abdff35b324839f1617fa1e07142c13cd832a4b154862224f697d091 source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:45.046663835Z" level=warning msg="Failed to get container, force will not fail: Container ID (05cd90a6abdff35b324839f1617fa1e07142c13cd832a4b154862224f697d091) does not exist" arch=amd64 command=delete container=05cd90a6abdff35b324839f1617fa1e07142c13cd832a4b154862224f697d091 name=kata-runtime pid=923207 source=runtime time="2020-04-03T11:47:48.198963434Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=955af23432b5622f754c04ee0f35c87ee1d0568e9c583dd296ca2eb8ea479fee error="open /run/vc/sbs/955af23432b5622f754c04ee0f35c87ee1d0568e9c583dd296ca2eb8ea479fee/devices.json: no such file or directory" name=kata-runtime pid=923473 sandbox=955af23432b5622f754c04ee0f35c87ee1d0568e9c583dd296ca2eb8ea479fee sandboxid=955af23432b5622f754c04ee0f35c87ee1d0568e9c583dd296ca2eb8ea479fee source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:48.307976894Z" level=warning msg="Failed to get container, force will not fail: Container ID (955af23432b5622f754c04ee0f35c87ee1d0568e9c583dd296ca2eb8ea479fee) does not exist" arch=amd64 command=delete container=955af23432b5622f754c04ee0f35c87ee1d0568e9c583dd296ca2eb8ea479fee name=kata-runtime pid=923498 source=runtime time="2020-04-03T11:47:51.685449304Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=858fb518a8497d7efede906c24f908c3ebfa3449048843e86f682948fa38467c error="open /run/vc/sbs/858fb518a8497d7efede906c24f908c3ebfa3449048843e86f682948fa38467c/devices.json: no such file or directory" name=kata-runtime pid=926740 sandbox=858fb518a8497d7efede906c24f908c3ebfa3449048843e86f682948fa38467c sandboxid=858fb518a8497d7efede906c24f908c3ebfa3449048843e86f682948fa38467c source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:51.797028356Z" level=warning msg="Failed to get container, force will not fail: Container ID (858fb518a8497d7efede906c24f908c3ebfa3449048843e86f682948fa38467c) does not exist" arch=amd64 command=delete container=858fb518a8497d7efede906c24f908c3ebfa3449048843e86f682948fa38467c name=kata-runtime pid=926751 source=runtime time="2020-04-03T11:47:55.060428325Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=b275662ffe5dc369cbe8f818c7681b955cbdc2f7e0b57e9c2b0c733871ea3e2d error="open /run/vc/sbs/b275662ffe5dc369cbe8f818c7681b955cbdc2f7e0b57e9c2b0c733871ea3e2d/devices.json: no such file or directory" name=kata-runtime pid=926955 sandbox=b275662ffe5dc369cbe8f818c7681b955cbdc2f7e0b57e9c2b0c733871ea3e2d sandboxid=b275662ffe5dc369cbe8f818c7681b955cbdc2f7e0b57e9c2b0c733871ea3e2d source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:55.161352936Z" level=warning msg="Failed to get container, force will not fail: Container ID (b275662ffe5dc369cbe8f818c7681b955cbdc2f7e0b57e9c2b0c733871ea3e2d) does not exist" arch=amd64 command=delete container=b275662ffe5dc369cbe8f818c7681b955cbdc2f7e0b57e9c2b0c733871ea3e2d name=kata-runtime pid=926966 source=runtime time="2020-04-03T11:47:58.693276535Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=8ef561e1f48e2fb04848dba18096c06758170c40cafe153bbf17236f7b4b47a1 error="open /run/vc/sbs/8ef561e1f48e2fb04848dba18096c06758170c40cafe153bbf17236f7b4b47a1/devices.json: no such file or directory" name=kata-runtime pid=927240 sandbox=8ef561e1f48e2fb04848dba18096c06758170c40cafe153bbf17236f7b4b47a1 sandboxid=8ef561e1f48e2fb04848dba18096c06758170c40cafe153bbf17236f7b4b47a1 source=virtcontainers subsystem=sandbox time="2020-04-03T11:47:58.836479003Z" level=warning msg="Failed to get container, force will not fail: Container ID (8ef561e1f48e2fb04848dba18096c06758170c40cafe153bbf17236f7b4b47a1) does not exist" arch=amd64 command=delete container=8ef561e1f48e2fb04848dba18096c06758170c40cafe153bbf17236f7b4b47a1 name=kata-runtime pid=927260 source=runtime time="2020-04-03T11:48:01.859442226Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=cd0191d3720301bd134057ba8bd2148e6f501d60047cbd4953fd5a6fa7eef884 error="open /run/vc/sbs/cd0191d3720301bd134057ba8bd2148e6f501d60047cbd4953fd5a6fa7eef884/devices.json: no such file or directory" name=kata-runtime pid=927486 sandbox=cd0191d3720301bd134057ba8bd2148e6f501d60047cbd4953fd5a6fa7eef884 sandboxid=cd0191d3720301bd134057ba8bd2148e6f501d60047cbd4953fd5a6fa7eef884 source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:01.965178843Z" level=warning msg="Failed to get container, force will not fail: Container ID (cd0191d3720301bd134057ba8bd2148e6f501d60047cbd4953fd5a6fa7eef884) does not exist" arch=amd64 command=delete container=cd0191d3720301bd134057ba8bd2148e6f501d60047cbd4953fd5a6fa7eef884 name=kata-runtime pid=927497 source=runtime time="2020-04-03T11:48:05.735120982Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=97814dba0957cd1b097c9ac34b7ae21270ec79499117592b51db818e83d2abf9 error="open /run/vc/sbs/97814dba0957cd1b097c9ac34b7ae21270ec79499117592b51db818e83d2abf9/devices.json: no such file or directory" name=kata-runtime pid=927736 sandbox=97814dba0957cd1b097c9ac34b7ae21270ec79499117592b51db818e83d2abf9 sandboxid=97814dba0957cd1b097c9ac34b7ae21270ec79499117592b51db818e83d2abf9 source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:05.882254187Z" level=warning msg="Failed to get container, force will not fail: Container ID (97814dba0957cd1b097c9ac34b7ae21270ec79499117592b51db818e83d2abf9) does not exist" arch=amd64 command=delete container=97814dba0957cd1b097c9ac34b7ae21270ec79499117592b51db818e83d2abf9 name=kata-runtime pid=927749 source=runtime time="2020-04-03T11:48:10.126366931Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=7698961424b6eb07423a66380812bd3247761294d4d192c52ed73e0413b7557c error="open /run/vc/sbs/7698961424b6eb07423a66380812bd3247761294d4d192c52ed73e0413b7557c/devices.json: no such file or directory" name=kata-runtime pid=928059 sandbox=7698961424b6eb07423a66380812bd3247761294d4d192c52ed73e0413b7557c sandboxid=7698961424b6eb07423a66380812bd3247761294d4d192c52ed73e0413b7557c source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:10.252311178Z" level=warning msg="Failed to get container, force will not fail: Container ID (7698961424b6eb07423a66380812bd3247761294d4d192c52ed73e0413b7557c) does not exist" arch=amd64 command=delete container=7698961424b6eb07423a66380812bd3247761294d4d192c52ed73e0413b7557c name=kata-runtime pid=928070 source=runtime time="2020-04-03T11:48:13.316426619Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=f8817cd924ab20af38ac69147797f0c23c7de5f4e9012212d1d3877bebac2ee5 error="open /run/vc/sbs/f8817cd924ab20af38ac69147797f0c23c7de5f4e9012212d1d3877bebac2ee5/devices.json: no such file or directory" name=kata-runtime pid=928205 sandbox=f8817cd924ab20af38ac69147797f0c23c7de5f4e9012212d1d3877bebac2ee5 sandboxid=f8817cd924ab20af38ac69147797f0c23c7de5f4e9012212d1d3877bebac2ee5 source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:13.44487229Z" level=warning msg="Failed to get container, force will not fail: Container ID (f8817cd924ab20af38ac69147797f0c23c7de5f4e9012212d1d3877bebac2ee5) does not exist" arch=amd64 command=delete container=f8817cd924ab20af38ac69147797f0c23c7de5f4e9012212d1d3877bebac2ee5 name=kata-runtime pid=928215 source=runtime time="2020-04-03T11:48:16.774211309Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=b6f2dfd75e5b906ee2295b9beb9155facab4b8f9c4fe74ed3cfdad093c795e1e error="open /run/vc/sbs/b6f2dfd75e5b906ee2295b9beb9155facab4b8f9c4fe74ed3cfdad093c795e1e/devices.json: no such file or directory" name=kata-runtime pid=928440 sandbox=b6f2dfd75e5b906ee2295b9beb9155facab4b8f9c4fe74ed3cfdad093c795e1e sandboxid=b6f2dfd75e5b906ee2295b9beb9155facab4b8f9c4fe74ed3cfdad093c795e1e source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:16.910834481Z" level=warning msg="Failed to get container, force will not fail: Container ID (b6f2dfd75e5b906ee2295b9beb9155facab4b8f9c4fe74ed3cfdad093c795e1e) does not exist" arch=amd64 command=delete container=b6f2dfd75e5b906ee2295b9beb9155facab4b8f9c4fe74ed3cfdad093c795e1e name=kata-runtime pid=928463 source=runtime time="2020-04-03T11:48:20.762568878Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=fd37b5079f8245cdfc02926193d305258c149c47de38574a638c0e791c354d10 error="open /run/vc/sbs/fd37b5079f8245cdfc02926193d305258c149c47de38574a638c0e791c354d10/devices.json: no such file or directory" name=kata-runtime pid=928752 sandbox=fd37b5079f8245cdfc02926193d305258c149c47de38574a638c0e791c354d10 sandboxid=fd37b5079f8245cdfc02926193d305258c149c47de38574a638c0e791c354d10 source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:20.854674344Z" level=warning msg="Failed to get container, force will not fail: Container ID (fd37b5079f8245cdfc02926193d305258c149c47de38574a638c0e791c354d10) does not exist" arch=amd64 command=delete container=fd37b5079f8245cdfc02926193d305258c149c47de38574a638c0e791c354d10 name=kata-runtime pid=928763 source=runtime time="2020-04-03T11:48:24.593389456Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=7977be81b4f949a31dad8661ad8b444f72018e55d94b69684a093ebd42ccc5ea error="open /run/vc/sbs/7977be81b4f949a31dad8661ad8b444f72018e55d94b69684a093ebd42ccc5ea/devices.json: no such file or directory" name=kata-runtime pid=932064 sandbox=7977be81b4f949a31dad8661ad8b444f72018e55d94b69684a093ebd42ccc5ea sandboxid=7977be81b4f949a31dad8661ad8b444f72018e55d94b69684a093ebd42ccc5ea source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:24.686639182Z" level=warning msg="Failed to get container, force will not fail: Container ID (7977be81b4f949a31dad8661ad8b444f72018e55d94b69684a093ebd42ccc5ea) does not exist" arch=amd64 command=delete container=7977be81b4f949a31dad8661ad8b444f72018e55d94b69684a093ebd42ccc5ea name=kata-runtime pid=932075 source=runtime time="2020-04-03T11:48:28.541063896Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=0d942561506baf1f5779d606256476adbe76829f41938141742a946fc228a171 error="open /run/vc/sbs/0d942561506baf1f5779d606256476adbe76829f41938141742a946fc228a171/devices.json: no such file or directory" name=kata-runtime pid=932407 sandbox=0d942561506baf1f5779d606256476adbe76829f41938141742a946fc228a171 sandboxid=0d942561506baf1f5779d606256476adbe76829f41938141742a946fc228a171 source=virtcontainers subsystem=sandbox time="2020-04-03T11:48:28.712369816Z" level=warning msg="Failed to get container, force will not fail: Container ID (0d942561506baf1f5779d606256476adbe76829f41938141742a946fc228a171) does not exist" arch=amd64 command=delete container=0d942561506baf1f5779d606256476adbe76829f41938141742a946fc228a171 name=kata-runtime pid=932438 source=runtime time="2020-04-03T11:54:11.255341014Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=98fcd87206289464e532483a74787050765e6fa423ec39d91c17559310cbcfbe error="open /run/vc/sbs/98fcd87206289464e532483a74787050765e6fa423ec39d91c17559310cbcfbe/devices.json: no such file or directory" name=kata-runtime pid=971416 sandbox=98fcd87206289464e532483a74787050765e6fa423ec39d91c17559310cbcfbe sandboxid=98fcd87206289464e532483a74787050765e6fa423ec39d91c17559310cbcfbe source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:11.35037438Z" level=warning msg="Failed to get container, force will not fail: Container ID (98fcd87206289464e532483a74787050765e6fa423ec39d91c17559310cbcfbe) does not exist" arch=amd64 command=delete container=98fcd87206289464e532483a74787050765e6fa423ec39d91c17559310cbcfbe name=kata-runtime pid=971427 source=runtime time="2020-04-03T11:54:14.184743686Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=204618e4a29871ded61a0764950f78915ace2d0103c1a6661e2be98df1185059 error="open /run/vc/sbs/204618e4a29871ded61a0764950f78915ace2d0103c1a6661e2be98df1185059/devices.json: no such file or directory" name=kata-runtime pid=971590 sandbox=204618e4a29871ded61a0764950f78915ace2d0103c1a6661e2be98df1185059 sandboxid=204618e4a29871ded61a0764950f78915ace2d0103c1a6661e2be98df1185059 source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:14.297145844Z" level=warning msg="Failed to get container, force will not fail: Container ID (204618e4a29871ded61a0764950f78915ace2d0103c1a6661e2be98df1185059) does not exist" arch=amd64 command=delete container=204618e4a29871ded61a0764950f78915ace2d0103c1a6661e2be98df1185059 name=kata-runtime pid=971602 source=runtime time="2020-04-03T11:54:17.658947017Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=1d7df92c76064584d01a1430fcf20526ea940a2a60742cf2d2ea45f3822256d5 error="open /run/vc/sbs/1d7df92c76064584d01a1430fcf20526ea940a2a60742cf2d2ea45f3822256d5/devices.json: no such file or directory" name=kata-runtime pid=972461 sandbox=1d7df92c76064584d01a1430fcf20526ea940a2a60742cf2d2ea45f3822256d5 sandboxid=1d7df92c76064584d01a1430fcf20526ea940a2a60742cf2d2ea45f3822256d5 source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:17.789798209Z" level=warning msg="Failed to get container, force will not fail: Container ID (1d7df92c76064584d01a1430fcf20526ea940a2a60742cf2d2ea45f3822256d5) does not exist" arch=amd64 command=delete container=1d7df92c76064584d01a1430fcf20526ea940a2a60742cf2d2ea45f3822256d5 name=kata-runtime pid=972639 source=runtime time="2020-04-03T11:54:21.466114296Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=ed205574344ef9ed042a5147afc9157ed94924ec02b84675ba37125d6b913823 error="open /run/vc/sbs/ed205574344ef9ed042a5147afc9157ed94924ec02b84675ba37125d6b913823/devices.json: no such file or directory" name=kata-runtime pid=975225 sandbox=ed205574344ef9ed042a5147afc9157ed94924ec02b84675ba37125d6b913823 sandboxid=ed205574344ef9ed042a5147afc9157ed94924ec02b84675ba37125d6b913823 source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:21.586692148Z" level=warning msg="Failed to get container, force will not fail: Container ID (ed205574344ef9ed042a5147afc9157ed94924ec02b84675ba37125d6b913823) does not exist" arch=amd64 command=delete container=ed205574344ef9ed042a5147afc9157ed94924ec02b84675ba37125d6b913823 name=kata-runtime pid=975236 source=runtime time="2020-04-03T11:54:26.059045294Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=b9921e5bb3328678a5abd4f18d06f444c2e00e35bde66635e365a7e82e5e02b8 error="open /run/vc/sbs/b9921e5bb3328678a5abd4f18d06f444c2e00e35bde66635e365a7e82e5e02b8/devices.json: no such file or directory" name=kata-runtime pid=975478 sandbox=b9921e5bb3328678a5abd4f18d06f444c2e00e35bde66635e365a7e82e5e02b8 sandboxid=b9921e5bb3328678a5abd4f18d06f444c2e00e35bde66635e365a7e82e5e02b8 source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:26.15099803Z" level=warning msg="Failed to get container, force will not fail: Container ID (b9921e5bb3328678a5abd4f18d06f444c2e00e35bde66635e365a7e82e5e02b8) does not exist" arch=amd64 command=delete container=b9921e5bb3328678a5abd4f18d06f444c2e00e35bde66635e365a7e82e5e02b8 name=kata-runtime pid=975489 source=runtime time="2020-04-03T11:54:30.144305306Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=789d638d20bdf76408b1b079c788404dfa72875414ab4875d2f9c2e8ed645037 error="open /run/vc/sbs/789d638d20bdf76408b1b079c788404dfa72875414ab4875d2f9c2e8ed645037/devices.json: no such file or directory" name=kata-runtime pid=975795 sandbox=789d638d20bdf76408b1b079c788404dfa72875414ab4875d2f9c2e8ed645037 sandboxid=789d638d20bdf76408b1b079c788404dfa72875414ab4875d2f9c2e8ed645037 source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:30.247665047Z" level=warning msg="Failed to get container, force will not fail: Container ID (789d638d20bdf76408b1b079c788404dfa72875414ab4875d2f9c2e8ed645037) does not exist" arch=amd64 command=delete container=789d638d20bdf76408b1b079c788404dfa72875414ab4875d2f9c2e8ed645037 name=kata-runtime pid=975807 source=runtime time="2020-04-03T11:54:33.478566927Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=926eb6b6132c8a62d3e31e250954c3aae9eba16b87d6687ee8f3fa4dd2e92e5d error="open /run/vc/sbs/926eb6b6132c8a62d3e31e250954c3aae9eba16b87d6687ee8f3fa4dd2e92e5d/devices.json: no such file or directory" name=kata-runtime pid=975977 sandbox=926eb6b6132c8a62d3e31e250954c3aae9eba16b87d6687ee8f3fa4dd2e92e5d sandboxid=926eb6b6132c8a62d3e31e250954c3aae9eba16b87d6687ee8f3fa4dd2e92e5d source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:33.590296086Z" level=warning msg="Failed to get container, force will not fail: Container ID (926eb6b6132c8a62d3e31e250954c3aae9eba16b87d6687ee8f3fa4dd2e92e5d) does not exist" arch=amd64 command=delete container=926eb6b6132c8a62d3e31e250954c3aae9eba16b87d6687ee8f3fa4dd2e92e5d name=kata-runtime pid=975988 source=runtime time="2020-04-03T11:54:38.599704513Z" level=warning msg="load sandbox devices failed" arch=amd64 command=create container=7b7150a9ecc89a6a92e09b384c879e6058855bf3716db5af79f44b72d6dd266c error="open /run/vc/sbs/7b7150a9ecc89a6a92e09b384c879e6058855bf3716db5af79f44b72d6dd266c/devices.json: no such file or directory" name=kata-runtime pid=976404 sandbox=7b7150a9ecc89a6a92e09b384c879e6058855bf3716db5af79f44b72d6dd266c sandboxid=7b7150a9ecc89a6a92e09b384c879e6058855bf3716db5af79f44b72d6dd266c source=virtcontainers subsystem=sandbox time="2020-04-03T11:54:38.758155034Z" level=warning msg="Failed to get container, force will not fail: Container ID (7b7150a9ecc89a6a92e09b384c879e6058855bf3716db5af79f44b72d6dd266c) does not exist" arch=amd64 command=delete container=7b7150a9ecc89a6a92e09b384c879e6058855bf3716db5af79f44b72d6dd266c name=kata-runtime pid=976426 source=runtime ``` ## Proxy logs No recent proxy problems found in system journal. ## Shim logs No recent shim problems found in system journal. ## Throttler logs No recent throttler problems found in system journal. --- # Container manager details No `docker` Have `kubectl` ## Kubernetes Output of "`kubectl version`": ``` Client Version: version.Info{Major:"", Minor:"", GitVersion:"v0.0.0-master+$Format:%h$", GitCommit:"$Format:%H$", GitTreeState:"", BuildDate:"1970-01-01T00:00:00Z", GoVersion:"go1.12.12", Compiler:"gc", Platform:"linux/amd64"} ``` Output of "`kubectl config view`": ``` apiVersion: v1 clusters: [] contexts: [] current-context: "" kind: Config preferences: {} users: [] ``` Output of "`systemctl show kubelet`": ``` Type=notify Restart=always NotifyAccess=main RestartUSec=10s TimeoutStartUSec=1min 30s TimeoutStopUSec=1min 30s RuntimeMaxUSec=infinity WatchdogUSec=0 WatchdogTimestamp=Fri 2020-04-03 09:41:25 UTC WatchdogTimestampMonotonic=15431924 PermissionsStartOnly=no RootDirectoryStartOnly=no RemainAfterExit=no GuessMainPID=yes MainPID=1421 ControlPID=0 FileDescriptorStoreMax=0 NFileDescriptorStore=0 StatusErrno=0 Result=success UID=[not set] GID=[not set] NRestarts=0 ExecMainStartTimestamp=Fri 2020-04-03 09:41:24 UTC ExecMainStartTimestampMonotonic=13926144 ExecMainExitTimestampMonotonic=0 ExecMainPID=1421 ExecMainCode=0 ExecMainStatus=0 ExecStartPre={ path=/bin/mkdir ; argv[]=/bin/mkdir --parents /etc/kubernetes/manifests ; ignore_errors=no ; start_time=[Fri 2020-04-03 09:41:24 UTC] ; stop_time=[Fri 2020-04-03 09:41:24 UTC] ; pid=1417 ; code=exited ; status=0 } ExecStartPre={ path=/bin/rm ; argv[]=/bin/rm -f /var/lib/kubelet/cpu_manager_state ; ignore_errors=no ; start_time=[Fri 2020-04-03 09:41:24 UTC] ; stop_time=[Fri 2020-04-03 09:41:24 UTC] ; pid=1419 ; code=exited ; status=0 } ExecStart={ path=/usr/bin/hyperkube ; argv[]=/usr/bin/hyperkube kubelet --config=/etc/kubernetes/kubelet.conf --bootstrap-kubeconfig=/etc/kubernetes/kubeconfig --kubeconfig=/var/lib/kubelet/kubeconfig --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --node-labels=node-role.kubernetes.io/worker,node.openshift.io/os_id=${ID} --minimum-container-ttl-duration=6m0s --volume-plugin-dir=/etc/kubernetes/kubelet-plugins/volume/exec --cloud-provider=gce --v=3 ; ignore_errors=no ; start_time=[Fri 2020-04-03 09:41:24 UTC] ; stop_time=[n/a] ; pid=1421 ; code=(null) ; status=0/0 } Slice=system.slice ControlGroup=/system.slice/kubelet.service MemoryCurrent=127049728 CPUUsageNSec=500842912550 EffectiveCPUs= EffectiveMemoryNodes= TasksCurrent=28 IPIngressBytes=18446744073709551615 IPIngressPackets=18446744073709551615 IPEgressBytes=18446744073709551615 IPEgressPackets=18446744073709551615 Delegate=no CPUAccounting=yes CPUWeight=[not set] StartupCPUWeight=[not set] CPUShares=[not set] StartupCPUShares=[not set] CPUQuotaPerSecUSec=infinity AllowedCPUs= AllowedMemoryNodes= IOAccounting=no IOWeight=[not set] StartupIOWeight=[not set] BlockIOAccounting=yes BlockIOWeight=[not set] StartupBlockIOWeight=[not set] MemoryAccounting=yes MemoryLow=0 MemoryHigh=infinity MemoryMax=infinity MemorySwapMax=infinity MemoryLimit=infinity DevicePolicy=auto TasksAccounting=yes TasksMax=infinity IPAccounting=no EnvironmentFiles=/etc/os-release (ignore_errors=no) EnvironmentFiles=/etc/kubernetes/kubelet-workaround (ignore_errors=yes) EnvironmentFiles=/etc/kubernetes/kubelet-env (ignore_errors=yes) UMask=0022 LimitCPU=infinity LimitCPUSoft=infinity LimitFSIZE=infinity LimitFSIZESoft=infinity LimitDATA=infinity LimitDATASoft=infinity LimitSTACK=infinity LimitSTACKSoft=8388608 LimitCORE=infinity LimitCORESoft=infinity LimitRSS=infinity LimitRSSSoft=infinity LimitNOFILE=262144 LimitNOFILESoft=1024 LimitAS=infinity LimitASSoft=infinity LimitNPROC=59797 LimitNPROCSoft=59797 LimitMEMLOCK=65536 LimitMEMLOCKSoft=65536 LimitLOCKS=infinity LimitLOCKSSoft=infinity LimitSIGPENDING=59797 LimitSIGPENDINGSoft=59797 LimitMSGQUEUE=819200 LimitMSGQUEUESoft=819200 LimitNICE=0 LimitNICESoft=0 LimitRTPRIO=0 LimitRTPRIOSoft=0 LimitRTTIME=infinity LimitRTTIMESoft=infinity OOMScoreAdjust=0 Nice=0 IOSchedulingClass=0 IOSchedulingPriority=0 CPUSchedulingPolicy=0 CPUSchedulingPriority=0 CPUAffinity= NUMAPolicy=n/a NUMAMask= TimerSlackNSec=50000 CPUSchedulingResetOnFork=no NonBlocking=no StandardInput=null StandardInputData= StandardOutput=journal StandardError=inherit TTYReset=no TTYVHangup=no TTYVTDisallocate=no SyslogPriority=30 SyslogLevelPrefix=yes SyslogLevel=6 SyslogFacility=3 LogLevelMax=-1 LogRateLimitIntervalUSec=0 LogRateLimitBurst=0 SecureBits=0 CapabilityBoundingSet=cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend AmbientCapabilities= DynamicUser=no RemoveIPC=no MountFlags= PrivateTmp=no PrivateDevices=no ProtectKernelTunables=no ProtectKernelModules=no ProtectControlGroups=no PrivateNetwork=no PrivateUsers=no PrivateMounts=no ProtectHome=no ProtectSystem=no SameProcessGroup=no UtmpMode=init IgnoreSIGPIPE=yes NoNewPrivileges=no SystemCallErrorNumber=0 LockPersonality=no RuntimeDirectoryPreserve=no RuntimeDirectoryMode=0755 StateDirectoryMode=0755 CacheDirectoryMode=0755 LogsDirectoryMode=0755 ConfigurationDirectoryMode=0755 MemoryDenyWriteExecute=no RestrictRealtime=no RestrictSUIDSGID=no RestrictNamespaces=no MountAPIVFS=no KeyringMode=private KillMode=control-group KillSignal=15 SendSIGKILL=yes SendSIGHUP=no Id=kubelet.service Names=kubelet.service Requires=system.slice sysinit.target Wants=rpc-statd.service crio.service network-online.target WantedBy=multi-user.target Conflicts=shutdown.target Before=multi-user.target shutdown.target After=network-online.target systemd-journald.socket machine-config-daemon-host.service system.slice basic.target crio.service machine-config-daemon-firstboot.service sysinit.target Description=Kubernetes Kubelet LoadState=loaded ActiveState=active SubState=running FragmentPath=/etc/systemd/system/kubelet.service DropInPaths=/etc/systemd/system/kubelet.service.d/10-default-env.conf UnitFileState=enabled UnitFilePreset=enabled StateChangeTimestamp=Fri 2020-04-03 09:41:25 UTC StateChangeTimestampMonotonic=15431926 InactiveExitTimestamp=Fri 2020-04-03 09:41:24 UTC InactiveExitTimestampMonotonic=13915328 ActiveEnterTimestamp=Fri 2020-04-03 09:41:25 UTC ActiveEnterTimestampMonotonic=15431926 ActiveExitTimestampMonotonic=0 InactiveEnterTimestampMonotonic=0 CanStart=yes CanStop=yes CanReload=no CanIsolate=no StopWhenUnneeded=no RefuseManualStart=no RefuseManualStop=no AllowIsolate=no DefaultDependencies=yes OnFailureJobMode=replace IgnoreOnIsolate=no NeedDaemonReload=no JobTimeoutUSec=infinity JobRunningTimeoutUSec=infinity JobTimeoutAction=none ConditionResult=yes AssertResult=yes ConditionTimestamp=Fri 2020-04-03 09:41:24 UTC ConditionTimestampMonotonic=13913469 AssertTimestamp=Fri 2020-04-03 09:41:24 UTC AssertTimestampMonotonic=13913469 Transient=no Perpetual=no StartLimitIntervalUSec=10s StartLimitBurst=5 StartLimitAction=none FailureAction=none SuccessAction=none InvocationID=74bc6f1e86924fc49e37ec977667d131 CollectMode=inactive ``` Have `crio` ## crio Output of "`crio --version`": ``` crio version 1.16.4-1.dev.rhaos4.3.git9238eee.el8 ``` Output of "`systemctl show crio`": ``` Type=notify Restart=on-abnormal NotifyAccess=main RestartUSec=100ms TimeoutStartUSec=infinity TimeoutStopUSec=1min 30s RuntimeMaxUSec=infinity WatchdogUSec=0 WatchdogTimestamp=Fri 2020-04-03 09:41:24 UTC WatchdogTimestampMonotonic=13905397 PermissionsStartOnly=no RootDirectoryStartOnly=no RemainAfterExit=no GuessMainPID=yes MainPID=1309 ControlPID=0 FileDescriptorStoreMax=0 NFileDescriptorStore=0 StatusErrno=0 Result=success UID=[not set] GID=[not set] NRestarts=0 ExecMainStartTimestamp=Fri 2020-04-03 09:41:22 UTC ExecMainStartTimestampMonotonic=12514785 ExecMainExitTimestampMonotonic=0 ExecMainPID=1309 ExecMainCode=0 ExecMainStatus=0 ExecStart={ path=/usr/bin/crio ; argv[]=/usr/bin/crio $CRIO_STORAGE_OPTIONS $CRIO_NETWORK_OPTIONS $CRIO_METRICS_OPTIONS ; ignore_errors=no ; start_time=[Fri 2020-04-03 09:41:22 UTC] ; stop_time=[n/a] ; pid=1309 ; code=(null) ; status=0/0 } ExecReload={ path=/bin/kill ; argv[]=/bin/kill -s HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 } Slice=system.slice ControlGroup=/system.slice/crio.service MemoryCurrent=223531008 CPUUsageNSec=1128660045260 EffectiveCPUs= EffectiveMemoryNodes= TasksCurrent=18 IPIngressBytes=18446744073709551615 IPIngressPackets=18446744073709551615 IPEgressBytes=18446744073709551615 IPEgressPackets=18446744073709551615 Delegate=no CPUAccounting=yes CPUWeight=[not set] StartupCPUWeight=[not set] CPUShares=[not set] StartupCPUShares=[not set] CPUQuotaPerSecUSec=infinity AllowedCPUs= AllowedMemoryNodes= IOAccounting=no IOWeight=[not set] StartupIOWeight=[not set] BlockIOAccounting=yes BlockIOWeight=[not set] StartupBlockIOWeight=[not set] MemoryAccounting=yes MemoryLow=0 MemoryHigh=infinity MemoryMax=infinity MemorySwapMax=infinity MemoryLimit=infinity DevicePolicy=auto TasksAccounting=yes TasksMax=infinity IPAccounting=no Environment=GOTRACEBACK=crash EnvironmentFiles=/etc/sysconfig/crio (ignore_errors=yes) EnvironmentFiles=/etc/sysconfig/crio-metrics (ignore_errors=yes) EnvironmentFiles=/etc/sysconfig/crio-network (ignore_errors=yes) EnvironmentFiles=/etc/sysconfig/crio-storage (ignore_errors=yes) UMask=0022 LimitCPU=infinity LimitCPUSoft=infinity LimitFSIZE=infinity LimitFSIZESoft=infinity LimitDATA=infinity LimitDATASoft=infinity LimitSTACK=infinity LimitSTACKSoft=8388608 LimitCORE=infinity LimitCORESoft=infinity LimitRSS=infinity LimitRSSSoft=infinity LimitNOFILE=1048576 LimitNOFILESoft=1048576 LimitAS=infinity LimitASSoft=infinity LimitNPROC=1048576 LimitNPROCSoft=1048576 LimitMEMLOCK=65536 LimitMEMLOCKSoft=65536 LimitLOCKS=infinity LimitLOCKSSoft=infinity LimitSIGPENDING=59797 LimitSIGPENDINGSoft=59797 LimitMSGQUEUE=819200 LimitMSGQUEUESoft=819200 LimitNICE=0 LimitNICESoft=0 LimitRTPRIO=0 LimitRTPRIOSoft=0 LimitRTTIME=infinity LimitRTTIMESoft=infinity OOMScoreAdjust=-999 Nice=0 IOSchedulingClass=0 IOSchedulingPriority=0 CPUSchedulingPolicy=0 CPUSchedulingPriority=0 CPUAffinity= NUMAPolicy=n/a NUMAMask= TimerSlackNSec=50000 CPUSchedulingResetOnFork=no NonBlocking=no StandardInput=null StandardInputData= StandardOutput=journal StandardError=inherit TTYReset=no TTYVHangup=no TTYVTDisallocate=no SyslogPriority=30 SyslogLevelPrefix=yes SyslogLevel=6 SyslogFacility=3 LogLevelMax=-1 LogRateLimitIntervalUSec=0 LogRateLimitBurst=0 SecureBits=0 CapabilityBoundingSet=cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend AmbientCapabilities= DynamicUser=no RemoveIPC=no MountFlags= PrivateTmp=no PrivateDevices=no ProtectKernelTunables=no ProtectKernelModules=no ProtectControlGroups=no PrivateNetwork=no PrivateUsers=no PrivateMounts=no ProtectHome=no ProtectSystem=no SameProcessGroup=no UtmpMode=init IgnoreSIGPIPE=yes NoNewPrivileges=no SystemCallErrorNumber=0 LockPersonality=no RuntimeDirectoryPreserve=no RuntimeDirectoryMode=0755 StateDirectoryMode=0755 CacheDirectoryMode=0755 LogsDirectoryMode=0755 ConfigurationDirectoryMode=0755 MemoryDenyWriteExecute=no RestrictRealtime=no RestrictSUIDSGID=no RestrictNamespaces=no MountAPIVFS=no KeyringMode=private KillMode=control-group KillSignal=15 SendSIGKILL=yes SendSIGHUP=no Id=crio.service Names=crio.service Requires=sysinit.target system.slice crio-wipe.service WantedBy=kubelet.service Conflicts=shutdown.target Before=shutdown.target kubelet.service After=system.slice crio-wipe.service systemd-journald.socket basic.target network-online.target sysinit.target Documentation=https://github.com/cri-o/cri-o Description=Open Container Initiative Daemon LoadState=loaded ActiveState=active SubState=running FragmentPath=/usr/lib/systemd/system/crio.service DropInPaths=/etc/systemd/system/crio.service.d/10-default-env.conf UnitFileState=disabled UnitFilePreset=disabled StateChangeTimestamp=Fri 2020-04-03 09:41:24 UTC StateChangeTimestampMonotonic=13905399 InactiveExitTimestamp=Fri 2020-04-03 09:41:22 UTC InactiveExitTimestampMonotonic=12514849 ActiveEnterTimestamp=Fri 2020-04-03 09:41:24 UTC ActiveEnterTimestampMonotonic=13905399 ActiveExitTimestampMonotonic=0 InactiveEnterTimestampMonotonic=0 CanStart=yes CanStop=yes CanReload=yes CanIsolate=no StopWhenUnneeded=no RefuseManualStart=no RefuseManualStop=no AllowIsolate=no DefaultDependencies=yes OnFailureJobMode=replace IgnoreOnIsolate=no NeedDaemonReload=no JobTimeoutUSec=infinity JobRunningTimeoutUSec=infinity JobTimeoutAction=none ConditionResult=yes AssertResult=yes ConditionTimestamp=Fri 2020-04-03 09:41:22 UTC ConditionTimestampMonotonic=12511088 AssertTimestamp=Fri 2020-04-03 09:41:22 UTC AssertTimestampMonotonic=12511088 Transient=no Perpetual=no StartLimitIntervalUSec=10s StartLimitBurst=5 StartLimitAction=none FailureAction=none SuccessAction=none InvocationID=dc9c0ce408c34b83b0b0f03cdc069004 CollectMode=inactive ``` Output of "`cat /etc/crio/crio.conf`": ``` [crio] # The default log directory where all logs will go unless directly specified by # the kubelet. The log directory specified must be an absolute directory. log_dir = "/var/log/crio/pods" # Location for CRI-O to lay down the version file version_file = "/var/lib/crio/version" # The crio.api table contains settings for the kubelet/gRPC interface. [crio.api] # Path to AF_LOCAL socket on which CRI-O will listen. listen = "/var/run/crio/crio.sock" # Host IP considered as the primary IP to use by CRI-O for things such as host network IP. host_ip = "" # IP address on which the stream server will listen. stream_address = "" # The port on which the stream server will listen. stream_port = "10010" # Enable encrypted TLS transport of the stream server. stream_enable_tls = false # Path to the x509 certificate file used to serve the encrypted stream. This # file can change, and CRI-O will automatically pick up the changes within 5 # minutes. stream_tls_cert = "" # Path to the key file used to serve the encrypted stream. This file can # change and CRI-O will automatically pick up the changes within 5 minutes. stream_tls_key = "" # Path to the x509 CA(s) file used to verify and authenticate client # communication with the encrypted stream. This file can change and CRI-O will # automatically pick up the changes within 5 minutes. stream_tls_ca = "" # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024. grpc_max_send_msg_size = 16777216 # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024. grpc_max_recv_msg_size = 16777216 # The crio.runtime table contains settings pertaining to the OCI runtime used # and options for how to set up and manage the OCI runtime. [crio.runtime] # default_runtime is the _name_ of the OCI runtime to be used as the default. # The name is matched against the runtimes map below. default_runtime = "runc" # If true, the runtime will not use pivot_root, but instead use MS_MOVE. no_pivot = false # Path to the conmon binary, used for monitoring the OCI runtime. # Will be searched for using $PATH if empty. conmon = "/usr/libexec/crio/conmon" # Cgroup setting for conmon conmon_cgroup = "pod" # Environment variable list for the conmon process, used for passing necessary # environment variables to conmon or the runtime. conmon_env = [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", ] # If true, SELinux will be used for pod separation on the host. selinux = true # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. If not specified, then the internal default seccomp profile # will be used. seccomp_profile = "" # Used to change the name of the default AppArmor profile of CRI-O. The default # profile name is "crio-default-" followed by the version string of CRI-O. apparmor_profile = "crio-default" # Cgroup management implementation used for the runtime. cgroup_manager = "systemd" # List of default capabilities for containers. If it is empty or commented out, # only the capabilities defined in the containers json file by the user/kube # will be added. default_capabilities = [ "CHOWN", "DAC_OVERRIDE", "FSETID", "FOWNER", "NET_RAW", "SETGID", "SETUID", "SETPCAP", "NET_BIND_SERVICE", "SYS_CHROOT", "KILL", ] # List of default sysctls. If it is empty or commented out, only the sysctls # defined in the container json file by the user/kube will be added. default_sysctls = [ ] # List of additional devices. specified as # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm".
#If it is empty or commented out, only the devices
# defined in the container json file by the user/kube will be added.
additional_devices = [
]
# Path to OCI hooks directories for automatically executed hooks.
hooks_dir = [
"/etc/containers/oci/hooks.d",
]
# List of default mounts for each container. **Deprecated:** this option will
# be removed in future versions in favor of default_mounts_file.
default_mounts = [
]
# Maximum number of processes allowed in a container.
pids_limit = 1024
# Maximum sized allowed for the container log file. Negative numbers indicate
# that no size limit is imposed. If it is positive, it must be >= 8192 to
# match/exceed conmon's read buffer. The file is truncated and re-opened so the
# limit is never exceeded.
log_size_max = -1
# Whether container output should be logged to journald in addition to the kuberentes log file
log_to_journald = false
# Path to directory in which container exit files are written to by conmon.
container_exits_dir = "/var/run/crio/exits"
# Path to directory for container attach sockets.
container_attach_socket_dir = "/var/run/crio"
# The prefix to use for the source of the bind mounts.
bind_mount_prefix = ""
# If set to true, all containers will run in read-only mode.
read_only = false
# Changes the verbosity of the logs based on the level it is set to. Options
# are fatal, panic, error, warn, info, and debug. This option supports live
# configuration reload.
log_level = "error"
# The UID mappings for the user namespace of each container. A range is
# specified in the form containerUID:HostUID:Size. Multiple ranges must be
# separated by comma.
uid_mappings = ""
# The GID mappings for the user namespace of each container. A range is
# specified in the form containerGID:HostGID:Size. Multiple ranges must be
# separated by comma.
gid_mappings = ""
manage_network_ns_lifecycle = true
[crio.runtime.runtimes.kata-qemu]
runtime_path="/usr/bin/kata-runtime"
# The minimal amount of time in seconds to wait before issuing a timeout
# regarding the proper termination of the container.
ctr_stop_timeout = 0
# ManageNetworkNSLifecycle determines whether we pin and remove network namespace
# and manage its lifecycle.
manage_network_ns_lifecycle = false
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
# of trust of the workload. Each entry in the table should follow the format:
#
#[crio.runtime.runtimes.runtime-handler]
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
#
# Where:
# - runtime-handler: name used to identify the runtime
# - runtime_path (optional, string): absolute path to the runtime executable in
# the host filesystem. If omitted, the runtime-handler identifier should match
# the runtime executable name, and the runtime executable should be placed
# in $PATH.
# - runtime_type (optional, string): type of runtime, one of: "oci", "vm". If
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): root directory for storage of containers
# state.
[crio.runtime.runtimes.runc]
runtime_path = ""
runtime_type = "oci"
runtime_root = "/run/runc"
# CRI-O reads its configured registries defaults from the system wide
# containers-registries.conf(5) located in /etc/containers/registries.conf. If
# you want to modify just CRI-O, you can change the registries configuration in
# this file. Otherwise, leave insecure_registries and registries commented out to
# use the system's defaults from /etc/containers/registries.conf.
[crio.image]
# Default transport for pulling images from a remote container storage.
default_transport = "docker://"
# The path to a file containing credentials necessary for pulling images from
# secure registries. The file is similar to that of /var/lib/kubelet/config.json
global_auth_file = "/var/lib/kubelet/config.json"
# The image used to instantiate infra containers.
# This option supports live configuration reload.
pause_image = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d2d6e45c9b45950259c392c8624c81e6d729087e91fb033d9d5adcb09e4abd3c"
# The path to a file containing credentials specific for pulling the pause_image from
# above. The file is similar to that of /var/lib/kubelet/config.json
# This option supports live configuration reload.
pause_image_auth_file = "/var/lib/kubelet/config.json"
# The command to run to have a container stay in the paused state.
# When explicitly set to "", it will fallback to the entrypoint and command
# specified in the pause image. When commented out, it will fallback to the
# default: "/pause". This option supports live configuration reload.
pause_command = "/usr/bin/pod"
# Path to the file which decides what sort of policy we use when deciding
# whether or not to trust an image that we've pulled. It is not recommended that
# this option be used, as the default behavior of using the system-wide default
# policy (i.e., /etc/containers/policy.json) is most often preferred. Please
# refer to containers-policy.json(5) for more details.
signature_policy = ""
# Controls how image volumes are handled. The valid values are mkdir, bind and
# ignore; the latter will ignore volumes entirely.
image_volumes = "mkdir"
# The crio.network table containers settings pertaining to the management of
# CNI plugins.
[crio.network]
# Path to the directory where CNI configuration files are located.
network_dir = "/etc/kubernetes/cni/net.d/"
# Paths to directories where CNI plugin binaries are located.
plugin_dirs = [
"/var/lib/cni/bin",
]
# A necessary configuration for Prometheus based metrics retrieval
[crio.metrics]
# Globally enable or disable metrics support.
enable_metrics = true
# The port on which the metrics server will listen.
metrics_port = 9537
```
No `containerd`
---
# Packages
No `dpkg`
Have `rpm`
Output of "`rpm -qa|egrep "(cc-oci-runtimecc-runtimerunv|kata-proxy|kata-runtime|kata-shim|kata-ksm-throttler|kata-containers-image|linux-container|qemu-)"`":
```
ipxe-roms-qemu-20181214-5.git133f4c47.el8.noarch
qemu-kvm-common-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
qemu-kvm-block-rbd-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
qemu-kvm-core-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
kata-runtime-1.10.0-2.fc32.fidencio.202002062140.x86_64
kata-proxy-1.10.0-2.fc32.fidencio.202002062140.x86_64
kata-shim-1.10.0-2.fc32.fidencio.202002062140.x86_64
qemu-kvm-block-curl-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
qemu-kvm-block-iscsi-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
qemu-kvm-block-ssh-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
qemu-img-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
qemu-kvm-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
kata-ksm-throttler-1.10.0-2.fc32.fidencio.202002062140.x86_64
qemu-kvm-block-gluster-2.12.0-99.module+el8.2.0+5827+8c39933c.x86_64
```
---