kata-containers / tests

Kata Containers tests, CI, and metrics
https://katacontainers.io/
Apache License 2.0
140 stars 196 forks source link

random state failure CentOS #2264

Closed GabyCT closed 3 years ago

GabyCT commented 4 years ago

See http://jenkins.katacontainers.io/job/kata-containers-tests-centos-8-q-35-PR/15/console

Meta details

Running kata-collect-data.sh version 1.10.0-rc0 (commit f1f9414a59209b62005bc55e506052fc087d71d7) at 2020-01-27.19:50:27.218036032+0000.


Runtime is /usr/local/bin/kata-runtime.

kata-env

Output of "/usr/local/bin/kata-runtime kata-env":

[Meta]
  Version = "1.0.23"

[Runtime]
  Debug = true
  Trace = false
  DisableGuestSeccomp = true
  DisableNewNetNs = false
  SandboxCgroupOnly = false
  Path = "/usr/local/bin/kata-runtime"
  [Runtime.Version]
    Semver = "1.10.0-rc0"
    Commit = "f1f9414a59209b62005bc55e506052fc087d71d7"
    OCI = "1.0.1-dev"
  [Runtime.Config]
    Path = "/usr/share/defaults/kata-containers/configuration.toml"

[Hypervisor]
  MachineType = "q35"
  Version = "QEMU emulator version 4.1.1 (kata-static)\nCopyright (c) 2003-2019 Fabrice Bellard and the QEMU Project developers"
  Path = "/usr/bin/qemu-system-x86_64"
  BlockDeviceDriver = "virtio-scsi"
  EntropySource = "/dev/urandom"
  Msize9p = 8192
  MemorySlots = 10
  Debug = true
  UseVSock = false
  SharedFS = "virtio-9p"

[Image]
  Path = "/usr/share/kata-containers/kata-containers-clearlinux-32180-osbuilder-7526f49-agent-f8f6b67.img"

[Kernel]
  Path = "/usr/share/kata-containers/vmlinuz-4.19.86-63"
  Parameters = "systemd.unit=kata-containers.target systemd.mask=systemd-networkd.service systemd.mask=systemd-networkd.socket agent.log=debug agent.log=debug"

[Initrd]
  Path = ""

[Proxy]
  Type = "kataProxy"
  Version = "kata-proxy version 1.10.0-rc0-a101c3da6f36167fbaa6daa72f8caf8b8c5230f0"
  Path = "/usr/libexec/kata-containers/kata-proxy"
  Debug = true

[Shim]
  Type = "kataShim"
  Version = "kata-shim version 1.10.0-rc0-7e0e427efe81cc2ce7e8298178a72183a9811698"
  Path = "/usr/libexec/kata-containers/kata-shim"
  Debug = true

[Agent]
  Type = "kata"
  Debug = true
  Trace = false
  TraceMode = ""
  TraceType = ""

[Host]
  Kernel = "4.18.0-80.11.2.el8_0.x86_64"
  Architecture = "amd64"
  VMContainerCapable = true
  SupportVSocks = true
  [Host.Distro]
    Name = "CentOS Linux"
    Version = "8"
  [Host.CPU]
    Vendor = "GenuineIntel"
    Model = "Intel(R) Xeon(R) Platinum 8171M CPU @ 2.60GHz"

[Netmon]
  Version = "kata-netmon version 1.10.0-rc0"
  Path = "/usr/libexec/kata-containers/kata-netmon"
  Debug = true
  Enable = false

Runtime config files

Runtime default config files

/etc/kata-containers/configuration.toml
/usr/share/defaults/kata-containers/configuration.toml

Runtime config file contents

Config file /etc/kata-containers/configuration.toml not found Output of "cat "/usr/share/defaults/kata-containers/configuration.toml"":

# Copyright (c) 2017-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#

# XXX: WARNING: this file is auto-generated.
# XXX:
# XXX: Source file: "cli/config/configuration-qemu.toml.in"
# XXX: Project:
# XXX:   Name: Kata Containers
# XXX:   Type: kata

[hypervisor.qemu]
path = "/usr/bin/qemu-system-x86_64"
kernel = "/usr/share/kata-containers/vmlinuz.container"
image = "/usr/share/kata-containers/kata-containers.img"
machine_type = "q35"

# Optional space-separated list of options to pass to the guest kernel.
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
# trouble running pre-2.15 glibc.
#
# WARNING: - any parameter specified here will take priority over the default
# parameter value of the same name used to start the virtual machine.
# Do not set values here unless you understand the impact of doing so as you
# may stop the virtual machine from booting.
# To see the list of default parameters, enable hypervisor debug, create a
# container and look for 'default-kernel-parameters' log entries.
kernel_params = " agent.log=debug"

# Path to the firmware.
# If you want that qemu uses the default firmware leave this option empty
firmware = ""

# Machine accelerators
# comma-separated list of machine accelerators to pass to the hypervisor.
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
machine_accelerators=""

# Default number of vCPUs per SB/VM:
# unspecified or 0                --> will be set to 1
# < 0                             --> will be set to the actual number of physical cores
# > 0 <= number of physical cores --> will be set to the specified number
# > number of physical cores      --> will be set to the actual number of physical cores
default_vcpus = 1

# Default maximum number of vCPUs per SB/VM:
# unspecified or == 0             --> will be set to the actual number of physical cores or to the maximum number
#                                     of vCPUs supported by KVM if that number is exceeded
# > 0 <= number of physical cores --> will be set to the specified number
# > number of physical cores      --> will be set to the actual number of physical cores or to the maximum number
#                                     of vCPUs supported by KVM if that number is exceeded
# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
# the actual number of physical cores is greater than it.
# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
# can be added to a SB/VM, but the memory footprint will be big. Another example, with
# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
# unless you know what are you doing.
default_maxvcpus = 0

# Bridges can be used to hot plug devices.
# Limitations:
# * Currently only pci bridges are supported
# * Until 30 devices per bridge can be hot plugged.
# * Until 5 PCI bridges can be cold plugged per VM.
#   This limitation could be a bug in qemu or in the kernel
# Default number of bridges per SB/VM:
# unspecified or 0   --> will be set to 1
# > 1 <= 5           --> will be set to the specified number
# > 5                --> will be set to 5
default_bridges = 1

# Default memory size in MiB for SB/VM.
# If unspecified then it will be set 2048 MiB.
default_memory = 2048
#
# Default memory slots per SB/VM.
# If unspecified then it will be set 10.
# This is will determine the times that memory will be hotadded to sandbox/VM.
#memory_slots = 10

# The size in MiB will be plused to max memory of hypervisor.
# It is the memory address space for the NVDIMM devie.
# If set block storage driver (block_device_driver) to "nvdimm",
# should set memory_offset to the size of block device.
# Default 0
#memory_offset = 0

# Specifies virtio-mem will be enabled or not.
# Please note that this option should be used with the command
# "echo 1 > /proc/sys/vm/overcommit_memory".
# Default false
#enable_virtio_mem = true

# Disable block device from being used for a container's rootfs.
# In case of a storage driver like devicemapper where a container's 
# root file system is backed by a block device, the block device is passed
# directly to the hypervisor for performance reasons. 
# This flag prevents the block device from being passed to the hypervisor, 
# 9pfs is used instead to pass the rootfs.
disable_block_device_use = false

# Shared file system type:
#   - virtio-9p (default)
#   - virtio-fs
shared_fs = "virtio-9p"

# Path to vhost-user-fs daemon.
virtio_fs_daemon = "/usr/bin/virtiofsd"

# Default size of DAX cache in MiB
virtio_fs_cache_size = 1024

# Extra args for virtiofsd daemon
#
# Format example:
#   ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"]
#
# see `virtiofsd -h` for possible options.
virtio_fs_extra_args = []

# Cache mode:
#
#  - none
#    Metadata, data, and pathname lookup are not cached in guest. They are
#    always fetched from host and any changes are immediately pushed to host.
#
#  - auto
#    Metadata and pathname lookup cache expires after a configured amount of
#    time (default is 1 second). Data is cached while the file is open (close
#    to open consistency).
#
#  - always
#    Metadata, data, and pathname lookup are cached in guest and never expire.
virtio_fs_cache = "always"

# Block storage driver to be used for the hypervisor in case the container
# rootfs is backed by a block device. This is virtio-scsi, virtio-blk
# or nvdimm.
block_device_driver = "virtio-scsi"

# Specifies cache-related options will be set to block devices or not.
# Default false
#block_device_cache_set = true

# Specifies cache-related options for block devices.
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
# Default false
#block_device_cache_direct = true

# Specifies cache-related options for block devices.
# Denotes whether flush requests for the device are ignored.
# Default false
#block_device_cache_noflush = true

# Enable iothreads (data-plane) to be used. This causes IO to be
# handled in a separate IO thread. This is currently only implemented
# for SCSI.
#
enable_iothreads = false

# Enable pre allocation of VM RAM, default false
# Enabling this will result in lower container density
# as all of the memory will be allocated and locked
# This is useful when you want to reserve all the memory
# upfront or in the cases where you want memory latencies
# to be very predictable
# Default false
#enable_mem_prealloc = true

# Enable huge pages for VM RAM, default false
# Enabling this will result in the VM memory
# being allocated using huge pages.
# This is useful when you want to use vhost-user network
# stacks within the container. This will automatically 
# result in memory pre allocation
#enable_hugepages = true

# Enable file based guest memory support. The default is an empty string which
# will disable this feature. In the case of virtio-fs, this is enabled
# automatically and '/dev/shm' is used as the backing folder.
# This option will be ignored if VM templating is enabled.
#file_mem_backend = ""

# Enable swap of vm memory. Default false.
# The behaviour is undefined if mem_prealloc is also set to true
#enable_swap = true

# This option changes the default hypervisor and kernel parameters
# to enable debug output where available. This extra output is added
# to the proxy logs, but only when proxy debug is also enabled.
# 
# Default false
enable_debug = true

# Disable the customizations done in the runtime when it detects
# that it is running on top a VMM. This will result in the runtime
# behaving as it would when running on bare metal.
# 
#disable_nesting_checks = true

# This is the msize used for 9p shares. It is the number of bytes 
# used for 9p packet payload.
#msize_9p = 8192

# If true and vsocks are supported, use vsocks to communicate directly
# with the agent and no proxy is started, otherwise use unix
# sockets and start a proxy to communicate with the agent.
# Default false
#use_vsock = true

# If false and nvdimm is supported, use nvdimm device to plug guest image.
# Otherwise virtio-block device is used.
# Default is false
#disable_image_nvdimm = true

# VFIO devices are hotplugged on a bridge by default. 
# Enable hotplugging on root bus. This may be required for devices with
# a large PCI bar, as this is a current limitation with hotplugging on 
# a bridge. This value is valid for "pc" machine type.
# Default false
#hotplug_vfio_on_root_bus = true

# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
# security (vhost-net runs ring0) for network I/O performance. 
#disable_vhost_net = true

#
# Default entropy source.
# The path to a host source of entropy (including a real hardware RNG)
# /dev/urandom and /dev/random are two main options.
# Be aware that /dev/random is a blocking source of entropy.  If the host
# runs out of entropy, the VMs boot time will increase leading to get startup
# timeouts.
# The source of entropy /dev/urandom is non-blocking and provides a
# generally acceptable source of entropy. It should work well for pretty much
# all practical purposes.
#entropy_source= "/dev/urandom"

# Path to OCI hook binaries in the *guest rootfs*.
# This does not affect host-side hooks which must instead be added to
# the OCI spec passed to the runtime.
#
# You can create a rootfs with hooks by customizing the osbuilder scripts:
# https://github.com/kata-containers/osbuilder
#
# Hooks must be stored in a subdirectory of guest_hook_path according to their
# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
# The agent will scan these directories for executable files and add them, in
# lexicographical order, to the lifecycle of the guest container.
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
# Warnings will be logged if any error is encountered will scanning for hooks,
# but it will not abort container execution.
#guest_hook_path = "/usr/share/oci/hooks"

[factory]
# VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and
# agent memory by mapping it readonly. It helps speeding up new container
# creation and saves a lot of memory if there are many kata containers running
# on the same host.
#
# When disabled, new VMs are created from scratch.
#
# Note: Requires "initrd=" to be set ("image=" is not supported).
#
# Default false
#enable_template = true

# Specifies the path of template.
#
# Default "/run/vc/vm/template"
#template_path = "/run/vc/vm/template"

# The number of caches of VMCache:
# unspecified or == 0   --> VMCache is disabled
# > 0                   --> will be set to the specified number
#
# VMCache is a function that creates VMs as caches before using it.
# It helps speed up new container creation.
# The function consists of a server and some clients communicating
# through Unix socket.  The protocol is gRPC in protocols/cache/cache.proto.
# The VMCache server will create some VMs and cache them by factory cache.
# It will convert the VM to gRPC format and transport it when gets
# requestion from clients.
# Factory grpccache is the VMCache client.  It will request gRPC format
# VM and convert it back to a VM.  If VMCache function is enabled,
# kata-runtime will request VM from factory grpccache when it creates
# a new sandbox.
#
# Default 0
#vm_cache_number = 0

# Specify the address of the Unix socket that is used by VMCache.
#
# Default /var/run/kata-containers/cache.sock
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"

[proxy.kata]
path = "/usr/libexec/kata-containers/kata-proxy"

# If enabled, proxy messages will be sent to the system log
# (default: disabled)
enable_debug = true

[shim.kata]
path = "/usr/libexec/kata-containers/kata-shim"

# If enabled, shim messages will be sent to the system log
# (default: disabled)
enable_debug = true

# If enabled, the shim will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started).
#
# Note: By default, the shim runs in a separate network namespace. Therefore,
# to allow it to send trace details to the Jaeger agent running on the host,
# it is necessary to set 'disable_new_netns=true' so that it runs in the host
# network namespace.
#
# (default: disabled)
#enable_tracing = true

[agent.kata]
# If enabled, make the agent display debug-level messages.
# (default: disabled)
enable_debug = true

# Enable agent tracing.
#
# If enabled, the default trace mode is "dynamic" and the
# default trace type is "isolated". The trace mode and type are set
# explicity with the `trace_type=` and `trace_mode=` options.
#
# Notes:
#
# - Tracing is ONLY enabled when `enable_tracing` is set: explicitly
#   setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing`
#   will NOT activate agent tracing.
#
# - See https://github.com/kata-containers/agent/blob/master/TRACING.md for
#   full details.
#
# (default: disabled)
#enable_tracing = true
#
#trace_mode = "dynamic"
#trace_type = "isolated"

# Comma separated list of kernel modules and their parameters.
# These modules will be loaded in the guest kernel using modprobe(8).
# The following example can be used to load two kernel modules with parameters
#  - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
# The first word is considered as the module name and the rest as its parameters.
# Container will not be started when:
#  * A kernel module is specified and the modprobe command is not installed in the guest
#    or it fails loading the module.
#  * The module is not available in the guest or it doesn't met the guest kernel
#    requirements, like architecture and version.
#
kernel_modules=[]

[netmon]
# If enabled, the network monitoring process gets started when the
# sandbox is created. This allows for the detection of some additional
# network being added to the existing network namespace, after the
# sandbox has been created.
# (default: disabled)
#enable_netmon = true

# Specify the path to the netmon binary.
path = "/usr/libexec/kata-containers/kata-netmon"

# If enabled, netmon messages will be sent to the system log
# (default: disabled)
enable_debug = true

[runtime]
# If enabled, the runtime will log additional debug messages to the
# system log
# (default: disabled)
enable_debug = true
#
# Internetworking model
# Determines how the VM should be connected to the
# the container network interface
# Options:
#
#   - macvtap
#     Used when the Container network interface can be bridged using
#     macvtap.
#
#   - none
#     Used when customize network. Only creates a tap device. No veth pair.
#
#   - tcfilter
#     Uses tc filter rules to redirect traffic from the network interface
#     provided by plugin to a tap interface connected to the VM.
#
internetworking_model="tcfilter"

# disable guest seccomp
# Determines whether container seccomp profiles are passed to the virtual
# machine and applied by the kata agent. If set to true, seccomp is not applied
# within the guest
# (default: true)
disable_guest_seccomp=true

# If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled)
#enable_tracing = true

# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
# `disable_new_netns` conflicts with `enable_netmon`
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
# (like OVS) directly.
# If you are using docker, `disable_new_netns` only works with `docker run --net=none`
# (default: false)
#disable_new_netns = true

# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
# The container cgroups in the host are not created, just one single cgroup per sandbox.
# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
# The sandbox cgroup is constrained if there is no container type annotation.
# See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType
sandbox_cgroup_only=false

# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# they may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# (default: [])
experimental=[]

KSM throttler

version

Output of "--version":

/usr/local/bin/kata-collect-data.sh: line 178: --version: command not found

systemd service

Image details

---
osbuilder:
  url: "https://github.com/kata-containers/osbuilder"
  version: "unknown"
rootfs-creation-time: "2020-01-27T19:46:35.585440621+0000Z"
description: "osbuilder rootfs"
file-format-version: "0.0.2"
architecture: "x86_64"
base-distro:
  name: "Clear"
  version: "32180"
  packages:
    default:
      - "chrony"
      - "iptables-bin"
      - "kmod-bin"
      - "libudev0-shim"
      - "systemd"
      - "util-linux-bin"
    extra:

agent:
  url: "https://github.com/kata-containers/agent"
  name: "kata-agent"
  version: "1.10.0-rc0-f8f6b6754809fa2af926d3bbfaaccd6c4f9981bd"
  agent-is-init-daemon: "no"

Initrd details

No initrd


Logfiles

Runtime logs

Recent runtime problems found in system journal:

time="2020-01-27T19:50:05.439375665Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:05.48475916Z" level=debug msg="restore sandbox failed" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km error="open /run/vc/sbs/pcUeuxwrw9V4GXvoY1Km/persist.json: no such file or directory" name=kata-runtime pid=50105 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=sandbox
time="2020-01-27T19:50:05.557940536Z" level=debug arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km default-kernel-parameters="tsc=reliable no_timer_check rcupdate.rcu_expedited=1 i8042.direct=1 i8042.dumbkbd=1 i8042.nopnp=1 i8042.noaux=1 noreplace-smp reboot=k console=hvc0 console=hvc1 iommu=off cryptomgr.notests net.ifnames=0 pci=lastbus=0 root=/dev/pmem0p1 rootflags=dax,data=ordered,errors=remount-ro ro rootfstype=ext4 debug systemd.show_status=true systemd.log_level=debug" name=kata-runtime pid=50105 source=virtcontainers subsystem=qemu
time="2020-01-27T19:50:05.692869515Z" level=info msg="sanner return error: read unix @->/run/vc/vm/pcUeuxwrw9V4GXvoY1Km/qmp.sock: use of closed network connection" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 source=virtcontainers subsystem=qmp
time="2020-01-27T19:50:07.79243138Z" level=warning msg="sandbox's cgroup won't be updated: cgroup path is empty" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=sandbox
time="2020-01-27T19:50:07.809529091Z" level=info msg="sanner return error: read unix @->/run/vc/vm/pcUeuxwrw9V4GXvoY1Km/qmp.sock: use of closed network connection" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 source=virtcontainers subsystem=qmp
time="2020-01-27T19:50:07.899651878Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 source=virtcontainers
time="2020-01-27T19:50:07.950850911Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:07.976674179Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers
time="2020-01-27T19:50:07.998527221Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=run container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50105 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:18.198306023Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=state container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50159 source=virtcontainers
time="2020-01-27T19:50:18.200264836Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=state container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50159 source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:18.253872785Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=list name=kata-runtime pid=50171 source=virtcontainers
time="2020-01-27T19:50:18.255777698Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=list name=kata-runtime pid=50171 source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:18.291456431Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 source=virtcontainers
time="2020-01-27T19:50:18.293417743Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:18.293757146Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers
time="2020-01-27T19:50:18.29445665Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:18.294816453Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers
time="2020-01-27T19:50:18.295756459Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:18.324289845Z" level=info msg="sanner return error: <nil>" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=qmp
time="2020-01-27T19:50:18.328936075Z" level=error msg="Could not read qemu pid file" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km error="open /run/vc/vm/pcUeuxwrw9V4GXvoY1Km/pid: no such file or directory" name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=qemu
time="2020-01-27T19:50:18.329185377Z" level=error msg="Could not read qemu pid file" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km error="open /run/vc/vm/pcUeuxwrw9V4GXvoY1Km/pid: no such file or directory" name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=qemu
time="2020-01-27T19:50:18.329461079Z" level=warning msg="failed to get sandbox config from old store: open /var/lib/vc/sbs/pcUeuxwrw9V4GXvoY1Km/config.json: no such file or directory" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers
time="2020-01-27T19:50:18.330371485Z" level=debug msg="Empty capabilities have been passed" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=compatoci
time="2020-01-27T19:50:18.333603806Z" level=error msg="Could not read qemu pid file" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km error="open /run/vc/vm/pcUeuxwrw9V4GXvoY1Km/pid: no such file or directory" name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=qemu
time="2020-01-27T19:50:18.333887207Z" level=warning msg="sandbox cgroups path is empty" arch=amd64 command=delete container=pcUeuxwrw9V4GXvoY1Km name=kata-runtime pid=50179 sandbox=pcUeuxwrw9V4GXvoY1Km source=virtcontainers subsystem=sandbox

Proxy logs

Recent proxy problems found in system journal:

time="2020-01-27T19:50:06.205484851Z" level=info msg="[    0.276647] EXT4-fs (pmem0p1): DAX enabled. Warning: EXPERIMENTAL, use at your own risk\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:06.206293156Z" level=info msg="[    0.278762] EXT4-fs (pmem0p1): mounted filesystem with ordered data mode. Opts: dax,data=ordered,errors=remount-ro\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.01045319Z" level=info msg="[    1.082931] systemd[1]: Can't load kernel CGROUP SKB BPF program, BPF firewalling is not supported: Function not implemented\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.010702992Z" level=info msg="[    1.083195] systemd[1]: Can't load kernel CGROUP DEVICE BPF program, BPF device control is not supported: Function not implemented\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.012102301Z" level=info msg="[    1.084682] systemd[1]: Failed to stat /etc/localtime, ignoring: No such file or directory\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.045528519Z" level=info msg="[    1.118059] systemd[1]: unit_file_build_name_map: normal unit file: /usr/lib/systemd/system/systemd-boot-check-no-failures.service\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.136507311Z" level=info msg="[    1.209050] systemd-sysctl[40]: Couldn't write '16' to 'kernel/sysrq', ignoring: No such file or directory\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.557691552Z" level=info msg="time=\"2020-01-27T19:50:07.546789235Z\" level=debug msg=\"request end\" debug_console=false duration=6.112418ms name=kata-agent pid=42 request=/grpc.AgentService/CreateSandbox resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.694441042Z" level=info msg="time=\"2020-01-27T19:50:07.683531654Z\" level=debug msg=\"request end\" debug_console=false duration=73.334544ms name=kata-agent pid=42 request=/grpc.AgentService/CreateContainer resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.694999946Z" level=info msg="time=\"2020-01-27T19:50:07.683779792Z\" level=info msg=\"ignoring unexpected signal\" debug_console=false name=kata-agent pid=42 sandbox=pcUeuxwrw9V4GXvoY1Km signal=\"child exited\" source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.695563949Z" level=info msg="time=\"2020-01-27T19:50:07.684233581Z\" level=info msg=\"ignoring unexpected signal\" debug_console=false name=kata-agent pid=42 sandbox=pcUeuxwrw9V4GXvoY1Km signal=\"child exited\" source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.708830736Z" level=info msg="time=\"2020-01-27T19:50:07.69794221Z\" level=debug msg=\"request end\" debug_console=false duration=\"45.966µs\" name=kata-agent pid=42 request=/grpc.AgentService/CloseStdin resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.792146778Z" level=info msg="time=\"2020-01-27T19:50:07.781316598Z\" level=debug msg=\"request end\" debug_console=false duration=\"4.507µs\" name=kata-agent pid=42 request=/grpc.AgentService/OnlineCPUMem resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:07.793794289Z" level=info msg="time=\"2020-01-27T19:50:07.78293097Z\" level=error debug_console=false error=\"Could not update parent cpuset cgroup (/sys/fs/cgroup/cpuset/cpuset.cpus) cpuset:'0': write /sys/fs/cgroup/cpuset/cpuset.cpus: permission denied\" name=kata-agent pid=42 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:08.028144614Z" level=info msg="time=\"2020-01-27T19:50:08.017237961Z\" level=debug msg=\"request end\" debug_console=false duration=3.149197ms name=kata-agent pid=42 request=/grpc.AgentService/StartContainer resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:08.035524762Z" level=info msg="time=\"2020-01-27T19:50:08.024695659Z\" level=info msg=\"ignoring unexpected signal\" debug_console=false name=kata-agent pid=42 sandbox=pcUeuxwrw9V4GXvoY1Km signal=\"child exited\" source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:18.202854752Z" level=info msg="time=\"2020-01-27T19:50:18.191920188Z\" level=debug msg=\"request end\" debug_console=false duration=\"217.56µs\" name=kata-agent pid=42 request=/grpc.AgentService/SignalProcess resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:18.208412089Z" level=info msg="time=\"2020-01-27T19:50:18.197358508Z\" level=debug msg=\"request end\" debug_console=false duration=\"475.426µs\" name=kata-agent pid=42 request=/grpc.AgentService/RemoveContainer resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:18.313499974Z" level=info msg="time=\"2020-01-27T19:50:18.301353931Z\" level=debug msg=\"request end\" debug_console=false duration=13.623202ms name=kata-agent pid=42 request=/grpc.AgentService/DestroySandbox resp=\"&Empty{XXX_unrecognized:[],}\" sandbox=pcUeuxwrw9V4GXvoY1Km source=agent\n" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=agent
time="2020-01-27T19:50:18.31438388Z" level=fatal msg="channel error" error="accept unix /run/vc/sbs/pcUeuxwrw9V4GXvoY1Km/proxy.sock: use of closed network connection" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=proxy
time="2020-01-27T19:50:18.31441718Z" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/pcUeuxwrw9V4GXvoY1Km/kata.sock: use of closed network connection" name=kata-proxy pid=50125 sandbox=pcUeuxwrw9V4GXvoY1Km source=proxy

Shim logs

Recent shim problems found in system journal:

time="2020-01-27T19:50:08.034716257Z" level=info msg="copy stderr failed" container=pcUeuxwrw9V4GXvoY1Km error="rpc error: code = Unknown desc = EOF" exec-id=pcUeuxwrw9V4GXvoY1Km name=kata-shim pid=1 source=shim
time="2020-01-27T19:50:08.034994659Z" level=info msg="copy stdout failed" container=pcUeuxwrw9V4GXvoY1Km error="rpc error: code = Unknown desc = EOF" exec-id=pcUeuxwrw9V4GXvoY1Km name=kata-shim pid=1 source=shim

Throttler logs

No recent throttler problems found in system journal.


Container manager details

Have docker

Docker

Output of "docker version":

Client:
 Version:           18.06.3-ce
 API version:       1.38
 Go version:        go1.10.3
 Git commit:        d7080c1
 Built:             Wed Feb 20 02:26:51 2019
 OS/Arch:           linux/amd64
 Experimental:      false

Server:
 Engine:
  Version:          18.06.3-ce
  API version:      1.38 (minimum version 1.12)
  Go version:       go1.10.3
  Git commit:       d7080c1
  Built:            Wed Feb 20 02:28:17 2019
  OS/Arch:          linux/amd64
  Experimental:     false

Output of "docker info":

Containers: 0
 Running: 0
 Paused: 0
 Stopped: 0
Images: 1
Server Version: 18.06.3-ce
Storage Driver: overlay2
 Backing Filesystem: xfs
 Supports d_type: true
 Native Overlay Diff: true
Logging Driver: json-file
Cgroup Driver: cgroupfs
Plugins:
 Volume: local
 Network: bridge host macvlan null overlay
 Log: awslogs fluentd gcplogs gelf journald json-file logentries splunk syslog
Swarm: inactive
Runtimes: kata-runtime runc
Default Runtime: runc
Init Binary: docker-init
containerd version: 468a545b9edcd5932818eb9de8e72413e616e86e
runc version: a592beb5bc4c4092b1b1bac971afed27687340c5
init version: fec3683
Security Options:
 seccomp
  Profile: default
Kernel Version: 4.18.0-80.11.2.el8_0.x86_64
Operating System: CentOS Linux 8 (Core)
OSType: linux
Architecture: x86_64
CPUs: 4
Total Memory: 15.64GiB
Name: centos8-0566f0
ID: A7W4:MVR5:WDWA:OFP6:ASMZ:ULBV:YU2M:4LHM:GKMD:YLOQ:HCQE:UWIF
Docker Root Dir: /var/lib/docker
Debug Mode (client): false
Debug Mode (server): true
 File Descriptors: 24
 Goroutines: 43
 System Time: 2020-01-27T19:50:29.280273749Z
 EventsListeners: 0
Registry: https://index.docker.io/v1/
Labels:
Experimental: false
Insecure Registries:
 127.0.0.0/8
Live Restore Enabled: false

Output of "systemctl show docker":

Type=notify
Restart=on-failure
NotifyAccess=main
RestartUSec=100ms
TimeoutStartUSec=infinity
TimeoutStopUSec=1min 30s
RuntimeMaxUSec=infinity
WatchdogUSec=0
WatchdogTimestamp=Mon 2020-01-27 19:49:29 UTC
WatchdogTimestampMonotonic=951803807
PermissionsStartOnly=no
RootDirectoryStartOnly=no
RemainAfterExit=no
GuessMainPID=yes
MainPID=46525
ControlPID=0
FileDescriptorStoreMax=0
NFileDescriptorStore=0
StatusErrno=0
Result=success
UID=[not set]
GID=[not set]
NRestarts=0
ExecMainStartTimestamp=Mon 2020-01-27 19:49:22 UTC
ExecMainStartTimestampMonotonic=944349386
ExecMainExitTimestampMonotonic=0
ExecMainPID=46525
ExecMainCode=0
ExecMainStatus=0
ExecStart={ path=/usr/bin/dockerd ; argv[]=/usr/bin/dockerd -D --add-runtime kata-runtime=/usr/local/bin/kata-runtime --default-runtime=runc --storage-driver=overlay2 ; ignore_errors=no ; start_time=[Mon 2020-01-27 19:49:22 UTC] ; stop_time=[n/a] ; pid=46525 ; code=(null) ; status=0/0 }
ExecReload={ path=/bin/kill ; argv[]=/bin/kill -s HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }
Slice=system.slice
ControlGroup=/system.slice/docker.service
MemoryCurrent=107352064
CPUUsageNSec=[not set]
TasksCurrent=29
IPIngressBytes=18446744073709551615
IPIngressPackets=18446744073709551615
IPEgressBytes=18446744073709551615
IPEgressPackets=18446744073709551615
Delegate=yes
DelegateControllers=cpu cpuacct io blkio memory devices pids
CPUAccounting=no
CPUWeight=[not set]
StartupCPUWeight=[not set]
CPUShares=[not set]
StartupCPUShares=[not set]
CPUQuotaPerSecUSec=infinity
IOAccounting=no
IOWeight=[not set]
StartupIOWeight=[not set]
BlockIOAccounting=no
BlockIOWeight=[not set]
StartupBlockIOWeight=[not set]
MemoryAccounting=yes
MemoryLow=0
MemoryHigh=infinity
MemoryMax=infinity
MemorySwapMax=infinity
MemoryLimit=infinity
DevicePolicy=auto
TasksAccounting=yes
TasksMax=102320
IPAccounting=no
UMask=0022
LimitCPU=infinity
LimitCPUSoft=infinity
LimitFSIZE=infinity
LimitFSIZESoft=infinity
LimitDATA=infinity
LimitDATASoft=infinity
LimitSTACK=infinity
LimitSTACKSoft=8388608
LimitCORE=infinity
LimitCORESoft=infinity
LimitRSS=infinity
LimitRSSSoft=infinity
LimitNOFILE=infinity
LimitNOFILESoft=infinity
LimitAS=infinity
LimitASSoft=infinity
LimitNPROC=infinity
LimitNPROCSoft=infinity
LimitMEMLOCK=16777216
LimitMEMLOCKSoft=16777216
LimitLOCKS=infinity
LimitLOCKSSoft=infinity
LimitSIGPENDING=63950
LimitSIGPENDINGSoft=63950
LimitMSGQUEUE=819200
LimitMSGQUEUESoft=819200
LimitNICE=0
LimitNICESoft=0
LimitRTPRIO=0
LimitRTPRIOSoft=0
LimitRTTIME=infinity
LimitRTTIMESoft=infinity
OOMScoreAdjust=0
Nice=0
IOSchedulingClass=0
IOSchedulingPriority=0
CPUSchedulingPolicy=0
CPUSchedulingPriority=0
TimerSlackNSec=50000
CPUSchedulingResetOnFork=no
NonBlocking=no
StandardInput=null
StandardInputData=
StandardOutput=journal
StandardError=inherit
TTYReset=no
TTYVHangup=no
TTYVTDisallocate=no
SyslogPriority=30
SyslogLevelPrefix=yes
SyslogLevel=6
SyslogFacility=3
LogLevelMax=-1
SecureBits=0
CapabilityBoundingSet=cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend
AmbientCapabilities=
DynamicUser=no
RemoveIPC=no
MountFlags=
PrivateTmp=no
PrivateDevices=no
ProtectKernelTunables=no
ProtectKernelModules=no
ProtectControlGroups=no
PrivateNetwork=no
PrivateUsers=no
PrivateMounts=no
ProtectHome=no
ProtectSystem=no
SameProcessGroup=no
UtmpMode=init
IgnoreSIGPIPE=yes
NoNewPrivileges=no
SystemCallErrorNumber=0
LockPersonality=no
RuntimeDirectoryPreserve=no
RuntimeDirectoryMode=0755
StateDirectoryMode=0755
CacheDirectoryMode=0755
LogsDirectoryMode=0755
ConfigurationDirectoryMode=0755
MemoryDenyWriteExecute=no
RestrictRealtime=no
RestrictNamespaces=no
MountAPIVFS=no
KeyringMode=private
KillMode=process
KillSignal=15
SendSIGKILL=yes
SendSIGHUP=no
Id=docker.service
Names=docker.service
Requires=sysinit.target system.slice
Wants=network-online.target
Conflicts=shutdown.target
Before=shutdown.target
After=system.slice network-online.target systemd-journald.socket sysinit.target firewalld.service basic.target
Documentation=https://docs.docker.com
Description=Docker Application Container Engine
LoadState=loaded
ActiveState=active
SubState=running
FragmentPath=/usr/lib/systemd/system/docker.service
DropInPaths=/etc/systemd/system/docker.service.d/kata-containers.conf
UnitFileState=disabled
UnitFilePreset=disabled
StateChangeTimestamp=Mon 2020-01-27 19:49:29 UTC
StateChangeTimestampMonotonic=951803809
InactiveExitTimestamp=Mon 2020-01-27 19:49:22 UTC
InactiveExitTimestampMonotonic=944349430
ActiveEnterTimestamp=Mon 2020-01-27 19:49:29 UTC
ActiveEnterTimestampMonotonic=951803809
ActiveExitTimestampMonotonic=0
InactiveEnterTimestampMonotonic=0
CanStart=yes
CanStop=yes
CanReload=yes
CanIsolate=no
StopWhenUnneeded=no
RefuseManualStart=no
RefuseManualStop=no
AllowIsolate=no
DefaultDependencies=yes
OnFailureJobMode=replace
IgnoreOnIsolate=no
NeedDaemonReload=no
JobTimeoutUSec=infinity
JobRunningTimeoutUSec=infinity
JobTimeoutAction=none
ConditionResult=yes
AssertResult=yes
ConditionTimestamp=Mon 2020-01-27 19:49:22 UTC
ConditionTimestampMonotonic=944347328
AssertTimestamp=Mon 2020-01-27 19:49:22 UTC
AssertTimestampMonotonic=944347329
Transient=no
Perpetual=no
StartLimitIntervalUSec=1min
StartLimitBurst=3
StartLimitAction=none
FailureAction=none
SuccessAction=none
InvocationID=393e495c6f3141ee8650f4dbb83ea370
CollectMode=inactive

No kubectl No crio No containerd


Packages

No dpkg Have rpm Output of "rpm -qa|egrep "(cc-oci-runtimecc-runtimerunv|kata-proxy|kata-runtime|kata-shim|kata-ksm-throttler|kata-containers-image|linux-container|qemu-)"":

GabyCT commented 4 years ago

ping @WeiZhang555 any idea of this failure?

bharathappali commented 4 years ago

I'm facing similar issue, when i tried --cpus 2 option in docker run with kata-runtime running on ubuntu 18.04

devimc commented 4 years ago

@bharathappali are you getting the same error?

Could not update parent cpuset cgroup (/sys/fs/cgroup/cpuset/cpuset.cpus) cpuset:'0': write /sys/fs/cgroup/cpuset/cpuset.cpus: permission denied

could you attach some logs?