Open r2an9 opened 3 years ago
@xiregele - any chance you can grab the resulting config json for the container(s)?
If you're using containerd, it should be around ~ /run/containerd/io.containerd.runtime.v2.task/default (or other namespace name like k8s.io)/(container name)/config.json
and, you can you share a minimal pod spec?
@egernst Thanks!
config.json pasted as below:
{
"ociVersion": "1.0.1-dev",
"process": {
"user": {
"uid": 0,
"gid": 0
},
"args": [
"/usr/sbin/sshd",
"-D"
],
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOSTNAME=ssh-server2",
"OWDEV_CONTROLLER_PORT_8080_TCP=tcp://10.102.121.178:8080",
"OWDEV_REDIS_SERVICE_PORT_REDIS=6379",
"OWDEV_CONTROLLER_SERVICE_PORT=8080",
"KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1",
"OWDEV_APIGATEWAY_PORT_8080_TCP_PROTO=tcp",
"OWDEV_NGINX_PORT_443_TCP=tcp://10.97.198.130:443",
"OWDEV_CONTROLLER_PORT_8080_TCP_ADDR=10.102.121.178",
"OWDEV_APIGATEWAY_SERVICE_HOST=10.101.53.78",
"OWDEV_COUCHDB_PORT_5984_TCP_PROTO=tcp",
"OWDEV_NGINX_PORT_443_TCP_PORT=443",
"KUBERNETES_PORT_443_TCP_PORT=443",
"KUBERNETES_SERVICE_PORT_HTTPS=443",
"OWDEV_APIGATEWAY_SERVICE_PORT_API=9000",
"OWDEV_COUCHDB_SERVICE_PORT=5984",
"OWDEV_NGINX_SERVICE_PORT=80",
"OWDEV_NGINX_PORT_80_TCP_ADDR=10.97.198.130",
"OWDEV_NGINX_PORT_443_TCP_PROTO=tcp",
"OWDEV_CONTROLLER_SERVICE_PORT_HTTP=8080",
"KUBERNETES_PORT=tcp://10.96.0.1:443",
"OWDEV_APIGATEWAY_PORT_9000_TCP_PORT=9000",
"OWDEV_NGINX_PORT_80_TCP_PORT=80",
"OWDEV_REDIS_PORT=tcp://10.108.243.153:6379",
"OWDEV_REDIS_PORT_6379_TCP_PORT=6379",
"OWDEV_CONTROLLER_PORT_8080_TCP_PORT=8080",
"OWDEV_APIGATEWAY_PORT_9000_TCP_PROTO=tcp",
"KUBERNETES_SERVICE_PORT=443",
"OWDEV_NGINX_PORT_80_TCP_PROTO=tcp",
"OWDEV_APIGATEWAY_PORT_8080_TCP=tcp://10.101.53.78:8080",
"OWDEV_NGINX_PORT=tcp://10.97.198.130:80",
"OWDEV_APIGATEWAY_PORT=tcp://10.101.53.78:8080",
"OWDEV_CONTROLLER_PORT=tcp://10.102.121.178:8080",
"KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443",
"OWDEV_APIGATEWAY_PORT_8080_TCP_ADDR=10.101.53.78",
"OWDEV_APIGATEWAY_PORT_9000_TCP=tcp://10.101.53.78:9000",
"OWDEV_COUCHDB_PORT_5984_TCP=tcp://10.99.12.120:5984",
"OWDEV_NGINX_SERVICE_HOST=10.97.198.130",
"OWDEV_NGINX_SERVICE_PORT_HTTP=80",
"OWDEV_CONTROLLER_SERVICE_HOST=10.102.121.178",
"OWDEV_REDIS_SERVICE_PORT=6379",
"OWDEV_NGINX_PORT_443_TCP_ADDR=10.97.198.130",
"OWDEV_COUCHDB_PORT_5984_TCP_PORT=5984",
"OWDEV_COUCHDB_PORT_5984_TCP_ADDR=10.99.12.120",
"KUBERNETES_SERVICE_HOST=10.96.0.1",
"OWDEV_CONTROLLER_PORT_8080_TCP_PROTO=tcp",
"OWDEV_COUCHDB_SERVICE_HOST=10.99.12.120",
"OWDEV_APIGATEWAY_PORT_8080_TCP_PORT=8080",
"OWDEV_APIGATEWAY_SERVICE_PORT=8080",
"KUBERNETES_PORT_443_TCP_PROTO=tcp",
"OWDEV_COUCHDB_SERVICE_PORT_COUCHDB=5984",
"OWDEV_REDIS_PORT_6379_TCP_PROTO=tcp",
"OWDEV_REDIS_PORT_6379_TCP_ADDR=10.108.243.153",
"OWDEV_APIGATEWAY_PORT_9000_TCP_ADDR=10.101.53.78",
"OWDEV_COUCHDB_PORT=tcp://10.99.12.120:5984",
"OWDEV_NGINX_SERVICE_PORT_HTTPS_API=443",
"OWDEV_NGINX_PORT_80_TCP=tcp://10.97.198.130:80",
"OWDEV_REDIS_SERVICE_HOST=10.108.243.153",
"OWDEV_REDIS_PORT_6379_TCP=tcp://10.108.243.153:6379",
"OWDEV_APIGATEWAY_SERVICE_PORT_MGMT=8080"
],
"cwd": "/",
"capabilities": {
"bounding": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"inheritable": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted": [
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
},
"apparmorProfile": "cri-containerd.apparmor.d",
"oomScoreAdj": 1000
},
"root": {
"path": "rootfs"
},
"mounts": [
{
"destination": "/proc",
"type": "proc",
"source": "proc",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/dev",
"type": "tmpfs",
"source": "tmpfs",
"options": [
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination": "/dev/pts",
"type": "devpts",
"source": "devpts",
"options": [
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination": "/dev/mqueue",
"type": "mqueue",
"source": "mqueue",
"options": [
"nosuid",
"noexec",
"nodev"
]
},
{
"destination": "/sys",
"type": "sysfs",
"source": "sysfs",
"options": [
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination": "/sys/fs/cgroup",
"type": "cgroup",
"source": "cgroup",
"options": [
"nosuid",
"noexec",
"nodev",
"relatime",
"ro"
]
},
{
"destination": "/etc/hosts",
"type": "bind",
"source": "/var/lib/kubelet/pods/57bf1404-5452-4684-9f20-9b0801a862a7/etc-hosts",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/dev/termination-log",
"type": "bind",
"source": "/var/lib/kubelet/pods/57bf1404-5452-4684-9f20-9b0801a862a7/containers/sshd/de38e85a",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/etc/hostname",
"type": "bind",
"source": "/var/lib/containerd/io.containerd.grpc.v1.cri/sandboxes/f419b61b49a38977852c5dffbe9d57a07effbe52872a19475385a2e98e713f6f/hostname",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/etc/resolv.conf",
"type": "bind",
"source": "/var/lib/containerd/io.containerd.grpc.v1.cri/sandboxes/f419b61b49a38977852c5dffbe9d57a07effbe52872a19475385a2e98e713f6f/resolv.conf",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/dev/shm",
"type": "bind",
"source": "/run/containerd/io.containerd.grpc.v1.cri/sandboxes/f419b61b49a38977852c5dffbe9d57a07effbe52872a19475385a2e98e713f6f/shm",
"options": [
"rbind",
"rprivate",
"rw"
]
},
{
"destination": "/var/run/secrets/kubernetes.io/serviceaccount",
"type": "bind",
"source": "/var/lib/kubelet/pods/57bf1404-5452-4684-9f20-9b0801a862a7/volumes/kubernetes.io~secret/default-token-lxvjp",
"options": [
"rbind",
"rprivate",
"ro"
]
}
],
"annotations": {
"io.kubernetes.cri.container-type": "container",
"io.kubernetes.cri.sandbox-id": "f419b61b49a38977852c5dffbe9d57a07effbe52872a19475385a2e98e713f6f"
},
"linux": {
"resources": {
"devices": [
{
"allow": false,
"access": "rwm"
}
],
"memory": {},
"cpu": {
"shares": 2,
"period": 100000
}
},
"cgroupsPath": "/kubepods/besteffort/pod57bf1404-5452-4684-9f20-9b0801a862a7/99aa43e8bda066a0cd2b5d69aa3db84e839a8125308fa33460805e2c25f592df",
"namespaces": [
{
"type": "pid"
},
{
"type": "ipc",
"path": "/proc/3299906/ns/ipc"
},
{
"type": "uts",
"path": "/proc/3299906/ns/uts"
},
{
"type": "mount"
},
{
"type": "network",
"path": "/proc/3299906/ns/net"
}
],
"maskedPaths": [
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"readonlyPaths": [
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
}
}
Pod spec:
apiVersion: v1
kind: Pod
metadata:
name: ssh-server2
labels:
test: sshd
spec:
runtimeClassName: kata-container
nodeSelector:
kata: debug
containers:
- name: sshd
image: rastasheep/ubuntu-sshd:18.04
Thanks @xiregele -- will try to reproduce, TAL tomorrow. These should be handled standard as any other volume. We do have special handling in place for sources which include kubernetes.io~empty-dir
, as we want to handle ephemeral volumes in a special case. This shouldn't be hitting this, however.
I'll try to take a look at this tomorrow, unless someone else beats me to it :)
Hello Is there an update on this issue or a possible workaround?
I'm testing kata containers v3.1.2 and firecracker and I have exactly the same problem.
Created a kata container (Tried with both qemu and cloudhypervisor) The pod should have default token mounted. Outout of
kubectl describe pod $PODNAME
snippet:.... resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: default-token-lxvjp readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: jfz1r05h03 nodeSelector: kata: debug preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always runtimeClassName: kata-container schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: default-token-lxvjp secret: defaultMode: 420 secretName: default-token-lxvjp ....
Files available under: /var/run/kata-containers/shared/sandboxes/f419b61b49a38977852c5dffbe9d57a07effbe52872a19475385a2e98e713f6f/shared
├── 99aa43e8bda066a0cd2b5d69aa3db84e839a8125308fa33460805e2c25f592df-84079a11fb9b1daf-serviceaccount │ ├── ca.crt -> ..data/ca.crt │ ├── namespace -> ..data/namespace │ └── token -> ..data/token
Also available under /var/lib/kubelet/pods/....
However, inside the container, there is no mount on /var/run/secrets/kubernetes.io/serviceaccount . Other mounts like hostname, resolv.conf .. are fine. Tried both with virtio.fs and virtio.9p. Same result. kata-runtime exec $sandbox-id does not work, so do not know how to debug inside the VM.
Kata version 2.0.1