Closed dorkamotorka closed 6 months ago
Additional debug issue can be found by printing cilium
pod logs:
Defaulted container "cilium-agent" out of: cilium-agent, install-portmap-cni-plugin (init), config (init), mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init), install-cni-binaries (init)
level=info msg="Memory available for map entries (0.003% of 540647292928B): 1351618232B" subsys=config
level=info msg="option bpf-ct-global-tcp-max set by dynamic sizing to 4742520" subsys=config
level=info msg="option bpf-ct-global-any-max set by dynamic sizing to 2371260" subsys=config
level=info msg="option bpf-nat-global-max set by dynamic sizing to 4742520" subsys=config
level=info msg="option bpf-neigh-global-max set by dynamic sizing to 4742520" subsys=config
level=info msg="option bpf-sock-rev-map-max set by dynamic sizing to 2371260" subsys=config
level=info msg=" --agent-health-port='9879'" subsys=daemon
level=info msg=" --agent-labels=''" subsys=daemon
level=info msg=" --agent-liveness-update-interval='1s'" subsys=daemon
level=info msg=" --agent-not-ready-taint-key='node.cilium.io/agent-not-ready'" subsys=daemon
level=info msg=" --allocator-list-timeout='3m0s'" subsys=daemon
level=info msg=" --allow-icmp-frag-needed='true'" subsys=daemon
level=info msg=" --allow-localhost='auto'" subsys=daemon
level=info msg=" --annotate-k8s-node='false'" subsys=daemon
level=info msg=" --api-rate-limit=''" subsys=daemon
level=info msg=" --arping-refresh-period='30s'" subsys=daemon
level=info msg=" --auto-create-cilium-node-resource='true'" subsys=daemon
level=info msg=" --auto-direct-node-routes='false'" subsys=daemon
level=info msg=" --bgp-announce-lb-ip='false'" subsys=daemon
level=info msg=" --bgp-announce-pod-cidr='false'" subsys=daemon
level=info msg=" --bgp-config-path='/var/lib/cilium/bgp/config.yaml'" subsys=daemon
level=info msg=" --bpf-auth-map-max='524288'" subsys=daemon
level=info msg=" --bpf-ct-global-any-max='262144'" subsys=daemon
level=info msg=" --bpf-ct-global-tcp-max='524288'" subsys=daemon
level=info msg=" --bpf-ct-timeout-regular-any='1m0s'" subsys=daemon
level=info msg=" --bpf-ct-timeout-regular-tcp='6h0m0s'" subsys=daemon
level=info msg=" --bpf-ct-timeout-regular-tcp-fin='10s'" subsys=daemon
level=info msg=" --bpf-ct-timeout-regular-tcp-syn='1m0s'" subsys=daemon
level=info msg=" --bpf-ct-timeout-service-any='1m0s'" subsys=daemon
level=info msg=" --bpf-ct-timeout-service-tcp='6h0m0s'" subsys=daemon
level=info msg=" --bpf-ct-timeout-service-tcp-grace='1m0s'" subsys=daemon
level=info msg=" --bpf-filter-priority='1'" subsys=daemon
level=info msg=" --bpf-fragments-map-max='8192'" subsys=daemon
level=info msg=" --bpf-lb-acceleration='disabled'" subsys=daemon
level=info msg=" --bpf-lb-affinity-map-max='0'" subsys=daemon
level=info msg=" --bpf-lb-algorithm='random'" subsys=daemon
level=info msg=" --bpf-lb-dev-ip-addr-inherit=''" subsys=daemon
level=info msg=" --bpf-lb-dsr-dispatch='opt'" subsys=daemon
level=info msg=" --bpf-lb-dsr-l4-xlate='frontend'" subsys=daemon
level=info msg=" --bpf-lb-external-clusterip='false'" subsys=daemon
level=info msg=" --bpf-lb-maglev-hash-seed='JLfvgnHc2kaSUFaI'" subsys=daemon
level=info msg=" --bpf-lb-maglev-map-max='0'" subsys=daemon
level=info msg=" --bpf-lb-maglev-table-size='16381'" subsys=daemon
level=info msg=" --bpf-lb-map-max='65536'" subsys=daemon
level=info msg=" --bpf-lb-mode='snat'" subsys=daemon
level=info msg=" --bpf-lb-rev-nat-map-max='0'" subsys=daemon
level=info msg=" --bpf-lb-rss-ipv4-src-cidr=''" subsys=daemon
level=info msg=" --bpf-lb-rss-ipv6-src-cidr=''" subsys=daemon
level=info msg=" --bpf-lb-service-backend-map-max='0'" subsys=daemon
level=info msg=" --bpf-lb-service-map-max='0'" subsys=daemon
level=info msg=" --bpf-lb-sock='false'" subsys=daemon
level=info msg=" --bpf-lb-sock-hostns-only='false'" subsys=daemon
level=info msg=" --bpf-lb-source-range-map-max='0'" subsys=daemon
level=info msg=" --bpf-map-dynamic-size-ratio='0.0025'" subsys=daemon
level=info msg=" --bpf-map-event-buffers=''" subsys=daemon
level=info msg=" --bpf-nat-global-max='524288'" subsys=daemon
level=info msg=" --bpf-neigh-global-max='524288'" subsys=daemon
level=info msg=" --bpf-policy-map-max='16384'" subsys=daemon
level=info msg=" --bpf-root='/sys/fs/bpf'" subsys=daemon
level=info msg=" --bpf-sock-rev-map-max='262144'" subsys=daemon
level=info msg=" --bypass-ip-availability-upon-restore='false'" subsys=daemon
level=info msg=" --certificates-directory='/var/run/cilium/certs'" subsys=daemon
level=info msg=" --cflags=''" subsys=daemon
level=info msg=" --cgroup-root='/run/cilium/cgroupv2'" subsys=daemon
level=info msg=" --cilium-endpoint-gc-interval='5m0s'" subsys=daemon
level=info msg=" --cluster-health-port='4240'" subsys=daemon
level=info msg=" --cluster-id='0'" subsys=daemon
level=info msg=" --cluster-name='default'" subsys=daemon
level=info msg=" --clustermesh-config='/var/lib/cilium/clustermesh/'" subsys=daemon
level=info msg=" --cmdref=''" subsys=daemon
level=info msg=" --cni-chaining-mode='portmap'" subsys=daemon
level=info msg=" --cni-chaining-target=''" subsys=daemon
level=info msg=" --cni-exclusive='true'" subsys=daemon
level=info msg=" --cni-log-file='/var/run/cilium/cilium-cni.log'" subsys=daemon
level=info msg=" --cnp-node-status-gc-interval='0s'" subsys=daemon
level=info msg=" --config=''" subsys=daemon
level=info msg=" --config-dir='/tmp/cilium/config-map'" subsys=daemon
level=info msg=" --config-sources='config-map:kube-system/cilium-config'" subsys=daemon
level=info msg=" --conntrack-gc-interval='0s'" subsys=daemon
level=info msg=" --crd-wait-timeout='5m0s'" subsys=daemon
level=info msg=" --custom-cni-conf='false'" subsys=daemon
level=info msg=" --datapath-mode='veth'" subsys=daemon
level=info msg=" --debug='false'" subsys=daemon
level=info msg=" --debug-verbose=''" subsys=daemon
level=info msg=" --derive-masquerade-ip-addr-from-device=''" subsys=daemon
level=info msg=" --devices=''" subsys=daemon
level=info msg=" --direct-routing-device=''" subsys=daemon
level=info msg=" --disable-cnp-status-updates='true'" subsys=daemon
level=info msg=" --disable-endpoint-crd='false'" subsys=daemon
level=info msg=" --disable-envoy-version-check='false'" subsys=daemon
level=info msg=" --disable-iptables-feeder-rules=''" subsys=daemon
level=info msg=" --dns-max-ips-per-restored-rule='1000'" subsys=daemon
level=info msg=" --dns-policy-unload-on-shutdown='false'" subsys=daemon
level=info msg=" --dnsproxy-concurrency-limit='0'" subsys=daemon
level=info msg=" --dnsproxy-concurrency-processing-grace-period='0s'" subsys=daemon
level=info msg=" --dnsproxy-lock-count='128'" subsys=daemon
level=info msg=" --dnsproxy-lock-timeout='500ms'" subsys=daemon
level=info msg=" --egress-gateway-policy-map-max='16384'" subsys=daemon
level=info msg=" --egress-gateway-reconciliation-trigger-interval='1s'" subsys=daemon
level=info msg=" --egress-masquerade-interfaces=''" subsys=daemon
level=info msg=" --egress-multi-home-ip-rule-compat='false'" subsys=daemon
level=info msg=" --enable-auto-protect-node-port-range='true'" subsys=daemon
level=info msg=" --enable-bandwidth-manager='false'" subsys=daemon
level=info msg=" --enable-bbr='false'" subsys=daemon
level=info msg=" --enable-bgp-control-plane='false'" subsys=daemon
level=info msg=" --enable-bpf-clock-probe='false'" subsys=daemon
level=info msg=" --enable-bpf-masquerade='false'" subsys=daemon
level=info msg=" --enable-bpf-tproxy='false'" subsys=daemon
level=info msg=" --enable-cilium-api-server-access='*'" subsys=daemon
level=info msg=" --enable-cilium-endpoint-slice='false'" subsys=daemon
level=info msg=" --enable-cilium-health-api-server-access='*'" subsys=daemon
level=info msg=" --enable-custom-calls='false'" subsys=daemon
level=info msg=" --enable-endpoint-health-checking='true'" subsys=daemon
level=info msg=" --enable-endpoint-routes='false'" subsys=daemon
level=info msg=" --enable-envoy-config='false'" subsys=daemon
level=info msg=" --enable-external-ips='false'" subsys=daemon
level=info msg=" --enable-health-check-nodeport='true'" subsys=daemon
level=info msg=" --enable-health-checking='true'" subsys=daemon
level=info msg=" --enable-high-scale-ipcache='false'" subsys=daemon
level=info msg=" --enable-host-firewall='false'" subsys=daemon
level=info msg=" --enable-host-legacy-routing='true'" subsys=daemon
level=info msg=" --enable-host-port='false'" subsys=daemon
level=info msg=" --enable-hubble='false'" subsys=daemon
level=info msg=" --enable-hubble-recorder-api='true'" subsys=daemon
level=info msg=" --enable-icmp-rules='true'" subsys=daemon
level=info msg=" --enable-identity-mark='true'" subsys=daemon
level=info msg=" --enable-ip-masq-agent='false'" subsys=daemon
level=info msg=" --enable-ipsec='false'" subsys=daemon
level=info msg=" --enable-ipsec-key-watcher='true'" subsys=daemon
level=info msg=" --enable-ipv4='true'" subsys=daemon
level=info msg=" --enable-ipv4-big-tcp='false'" subsys=daemon
level=info msg=" --enable-ipv4-egress-gateway='false'" subsys=daemon
level=info msg=" --enable-ipv4-fragment-tracking='true'" subsys=daemon
level=info msg=" --enable-ipv4-masquerade='true'" subsys=daemon
level=info msg=" --enable-ipv6='false'" subsys=daemon
level=info msg=" --enable-ipv6-big-tcp='false'" subsys=daemon
level=info msg=" --enable-ipv6-masquerade='true'" subsys=daemon
level=info msg=" --enable-ipv6-ndp='false'" subsys=daemon
level=info msg=" --enable-k8s='true'" subsys=daemon
level=info msg=" --enable-k8s-api-discovery='false'" subsys=daemon
level=info msg=" --enable-k8s-endpoint-slice='true'" subsys=daemon
level=info msg=" --enable-k8s-event-handover='false'" subsys=daemon
level=info msg=" --enable-k8s-networkpolicy='true'" subsys=daemon
level=info msg=" --enable-k8s-terminating-endpoint='true'" subsys=daemon
level=info msg=" --enable-l2-announcements='false'" subsys=daemon
level=info msg=" --enable-l2-neigh-discovery='true'" subsys=daemon
level=info msg=" --enable-l2-pod-announcements='false'" subsys=daemon
level=info msg=" --enable-l7-proxy='true'" subsys=daemon
level=info msg=" --enable-local-node-route='true'" subsys=daemon
level=info msg=" --enable-local-redirect-policy='false'" subsys=daemon
level=info msg=" --enable-metrics='true'" subsys=daemon
level=info msg=" --enable-mke='false'" subsys=daemon
level=info msg=" --enable-monitor='true'" subsys=daemon
level=info msg=" --enable-nat46x64-gateway='false'" subsys=daemon
level=info msg=" --enable-node-port='false'" subsys=daemon
level=info msg=" --enable-pmtu-discovery='false'" subsys=daemon
level=info msg=" --enable-policy='default'" subsys=daemon
level=info msg=" --enable-recorder='false'" subsys=daemon
level=info msg=" --enable-remote-node-identity='true'" subsys=daemon
level=info msg=" --enable-runtime-device-detection='false'" subsys=daemon
level=info msg=" --enable-sctp='false'" subsys=daemon
level=info msg=" --enable-service-topology='false'" subsys=daemon
level=info msg=" --enable-session-affinity='false'" subsys=daemon
level=info msg=" --enable-srv6='false'" subsys=daemon
level=info msg=" --enable-stale-cilium-endpoint-cleanup='true'" subsys=daemon
level=info msg=" --enable-svc-source-range-check='true'" subsys=daemon
level=info msg=" --enable-tracing='false'" subsys=daemon
level=info msg=" --enable-unreachable-routes='false'" subsys=daemon
level=info msg=" --enable-vtep='false'" subsys=daemon
level=info msg=" --enable-well-known-identities='false'" subsys=daemon
level=info msg=" --enable-wireguard='false'" subsys=daemon
level=info msg=" --enable-wireguard-userspace-fallback='false'" subsys=daemon
level=info msg=" --enable-xdp-prefilter='false'" subsys=daemon
level=info msg=" --enable-xt-socket-fallback='true'" subsys=daemon
level=info msg=" --encrypt-interface=''" subsys=daemon
level=info msg=" --encrypt-node='false'" subsys=daemon
level=info msg=" --endpoint-gc-interval='5m0s'" subsys=daemon
level=info msg=" --endpoint-queue-size='25'" subsys=daemon
level=info msg=" --endpoint-status=''" subsys=daemon
level=info msg=" --envoy-config-timeout='2m0s'" subsys=daemon
level=info msg=" --envoy-log=''" subsys=daemon
level=info msg=" --exclude-local-address=''" subsys=daemon
level=info msg=" --external-envoy-proxy='false'" subsys=daemon
level=info msg=" --fixed-identity-mapping=''" subsys=daemon
level=info msg=" --fqdn-regex-compile-lru-size='1024'" subsys=daemon
level=info msg=" --gops-port='9890'" subsys=daemon
level=info msg=" --http-403-msg=''" subsys=daemon
level=info msg=" --http-idle-timeout='0'" subsys=daemon
level=info msg=" --http-max-grpc-timeout='0'" subsys=daemon
level=info msg=" --http-normalize-path='true'" subsys=daemon
level=info msg=" --http-request-timeout='3600'" subsys=daemon
level=info msg=" --http-retry-count='3'" subsys=daemon
level=info msg=" --http-retry-timeout='0'" subsys=daemon
level=info msg=" --hubble-disable-tls='false'" subsys=daemon
level=info msg=" --hubble-event-buffer-capacity='4095'" subsys=daemon
level=info msg=" --hubble-event-queue-size='0'" subsys=daemon
level=info msg=" --hubble-export-file-compress='false'" subsys=daemon
level=info msg=" --hubble-export-file-max-backups='5'" subsys=daemon
level=info msg=" --hubble-export-file-max-size-mb='10'" subsys=daemon
level=info msg=" --hubble-export-file-path=''" subsys=daemon
level=info msg=" --hubble-listen-address=''" subsys=daemon
level=info msg=" --hubble-metrics=''" subsys=daemon
level=info msg=" --hubble-metrics-server=''" subsys=daemon
level=info msg=" --hubble-monitor-events=''" subsys=daemon
level=info msg=" --hubble-prefer-ipv6='false'" subsys=daemon
level=info msg=" --hubble-recorder-sink-queue-size='1024'" subsys=daemon
level=info msg=" --hubble-recorder-storage-path='/var/run/cilium/pcaps'" subsys=daemon
level=info msg=" --hubble-skip-unknown-cgroup-ids='true'" subsys=daemon
level=info msg=" --hubble-socket-path='/var/run/cilium/hubble.sock'" subsys=daemon
level=info msg=" --hubble-tls-cert-file=''" subsys=daemon
level=info msg=" --hubble-tls-client-ca-files=''" subsys=daemon
level=info msg=" --hubble-tls-key-file=''" subsys=daemon
level=info msg=" --identity-allocation-mode='crd'" subsys=daemon
level=info msg=" --identity-change-grace-period='5s'" subsys=daemon
level=info msg=" --identity-gc-interval='15m0s'" subsys=daemon
level=info msg=" --identity-heartbeat-timeout='30m0s'" subsys=daemon
level=info msg=" --identity-restore-grace-period='10m0s'" subsys=daemon
level=info msg=" --install-egress-gateway-routes='false'" subsys=daemon
level=info msg=" --install-iptables-rules='true'" subsys=daemon
level=info msg=" --install-no-conntrack-iptables-rules='false'" subsys=daemon
level=info msg=" --ip-allocation-timeout='2m0s'" subsys=daemon
level=info msg=" --ip-masq-agent-config-path='/etc/config/ip-masq-agent'" subsys=daemon
level=info msg=" --ipam='kubernetes'" subsys=daemon
level=info msg=" --ipam-cilium-node-update-rate='15s'" subsys=daemon
level=info msg=" --ipam-multi-pool-pre-allocation='default=8'" subsys=daemon
level=info msg=" --ipsec-key-file=''" subsys=daemon
level=info msg=" --ipsec-key-rotation-duration='5m0s'" subsys=daemon
level=info msg=" --iptables-lock-timeout='5s'" subsys=daemon
level=info msg=" --iptables-random-fully='false'" subsys=daemon
level=info msg=" --ipv4-native-routing-cidr=''" subsys=daemon
level=info msg=" --ipv4-node='auto'" subsys=daemon
level=info msg=" --ipv4-pod-subnets=''" subsys=daemon
level=info msg=" --ipv4-range='auto'" subsys=daemon
level=info msg=" --ipv4-service-loopback-address='169.254.42.1'" subsys=daemon
level=info msg=" --ipv4-service-range='auto'" subsys=daemon
level=info msg=" --ipv6-cluster-alloc-cidr='f00d::/64'" subsys=daemon
level=info msg=" --ipv6-mcast-device=''" subsys=daemon
level=info msg=" --ipv6-native-routing-cidr=''" subsys=daemon
level=info msg=" --ipv6-node='auto'" subsys=daemon
level=info msg=" --ipv6-pod-subnets=''" subsys=daemon
level=info msg=" --ipv6-range='auto'" subsys=daemon
level=info msg=" --ipv6-service-range='auto'" subsys=daemon
level=info msg=" --join-cluster='false'" subsys=daemon
level=info msg=" --k8s-api-server=''" subsys=daemon
level=info msg=" --k8s-client-burst='10'" subsys=daemon
level=info msg=" --k8s-client-qps='5'" subsys=daemon
level=info msg=" --k8s-heartbeat-timeout='30s'" subsys=daemon
level=info msg=" --k8s-kubeconfig-path=''" subsys=daemon
level=info msg=" --k8s-namespace='kube-system'" subsys=daemon
level=info msg=" --k8s-require-ipv4-pod-cidr='false'" subsys=daemon
level=info msg=" --k8s-require-ipv6-pod-cidr='false'" subsys=daemon
level=info msg=" --k8s-service-cache-size='128'" subsys=daemon
level=info msg=" --k8s-service-proxy-name=''" subsys=daemon
level=info msg=" --k8s-sync-timeout='3m0s'" subsys=daemon
level=info msg=" --k8s-watcher-endpoint-selector='metadata.name!=kube-scheduler,metadata.name!=kube-controller-manager,metadata.name!=etcd-operator,metadata.name!=gcp-controller-manager'" subsys=daemon
level=info msg=" --keep-config='false'" subsys=daemon
level=info msg=" --kube-proxy-replacement='false'" subsys=daemon
level=info msg=" --kube-proxy-replacement-healthz-bind-address=''" subsys=daemon
level=info msg=" --kvstore=''" subsys=daemon
level=info msg=" --kvstore-connectivity-timeout='2m0s'" subsys=daemon
level=info msg=" --kvstore-lease-ttl='15m0s'" subsys=daemon
level=info msg=" --kvstore-max-consecutive-quorum-errors='2'" subsys=daemon
level=info msg=" --kvstore-opt=''" subsys=daemon
level=info msg=" --kvstore-periodic-sync='5m0s'" subsys=daemon
level=info msg=" --l2-announcements-lease-duration='15s'" subsys=daemon
level=info msg=" --l2-announcements-renew-deadline='5s'" subsys=daemon
level=info msg=" --l2-announcements-retry-period='2s'" subsys=daemon
level=info msg=" --l2-pod-announcements-interface=''" subsys=daemon
level=info msg=" --label-prefix-file=''" subsys=daemon
level=info msg=" --labels=''" subsys=daemon
level=info msg=" --lib-dir='/var/lib/cilium'" subsys=daemon
level=info msg=" --local-max-addr-scope='252'" subsys=daemon
level=info msg=" --local-router-ipv4=''" subsys=daemon
level=info msg=" --local-router-ipv6=''" subsys=daemon
level=info msg=" --log-driver=''" subsys=daemon
level=info msg=" --log-opt=''" subsys=daemon
level=info msg=" --log-system-load='false'" subsys=daemon
level=info msg=" --max-controller-interval='0'" subsys=daemon
level=info msg=" --mesh-auth-enabled='true'" subsys=daemon
level=info msg=" --mesh-auth-gc-interval='5m0s'" subsys=daemon
level=info msg=" --mesh-auth-mutual-listener-port='0'" subsys=daemon
level=info msg=" --mesh-auth-queue-size='1024'" subsys=daemon
level=info msg=" --mesh-auth-rotated-identities-queue-size='1024'" subsys=daemon
level=info msg=" --mesh-auth-spiffe-trust-domain='spiffe.cilium'" subsys=daemon
level=info msg=" --mesh-auth-spire-admin-socket=''" subsys=daemon
level=info msg=" --metrics=''" subsys=daemon
level=info msg=" --mke-cgroup-mount=''" subsys=daemon
level=info msg=" --monitor-aggregation='medium'" subsys=daemon
level=info msg=" --monitor-aggregation-flags='all'" subsys=daemon
level=info msg=" --monitor-aggregation-interval='5s'" subsys=daemon
level=info msg=" --monitor-queue-size='0'" subsys=daemon
level=info msg=" --mtu='0'" subsys=daemon
level=info msg=" --node-encryption-opt-out-labels='node-role.kubernetes.io/control-plane'" subsys=daemon
level=info msg=" --node-port-acceleration='disabled'" subsys=daemon
level=info msg=" --node-port-algorithm='random'" subsys=daemon
level=info msg=" --node-port-bind-protection='true'" subsys=daemon
level=info msg=" --node-port-mode='snat'" subsys=daemon
level=info msg=" --node-port-range='30000,32767'" subsys=daemon
level=info msg=" --nodes-gc-interval='5m0s'" subsys=daemon
level=info msg=" --operator-api-serve-addr='127.0.0.1:9234'" subsys=daemon
level=info msg=" --operator-prometheus-serve-addr=':9963'" subsys=daemon
level=info msg=" --policy-audit-mode='false'" subsys=daemon
level=info msg=" --policy-queue-size='100'" subsys=daemon
level=info msg=" --policy-trigger-interval='1s'" subsys=daemon
level=info msg=" --pprof='false'" subsys=daemon
level=info msg=" --pprof-address='localhost'" subsys=daemon
level=info msg=" --pprof-port='6060'" subsys=daemon
level=info msg=" --preallocate-bpf-maps='false'" subsys=daemon
level=info msg=" --prepend-iptables-chains='true'" subsys=daemon
level=info msg=" --procfs='/host/proc'" subsys=daemon
level=info msg=" --prometheus-serve-addr=':9962'" subsys=daemon
level=info msg=" --proxy-connect-timeout='2'" subsys=daemon
level=info msg=" --proxy-gid='1337'" subsys=daemon
level=info msg=" --proxy-idle-timeout-seconds='60'" subsys=daemon
level=info msg=" --proxy-max-connection-duration-seconds='0'" subsys=daemon
level=info msg=" --proxy-max-requests-per-connection='0'" subsys=daemon
level=info msg=" --proxy-prometheus-port='9964'" subsys=daemon
level=info msg=" --read-cni-conf=''" subsys=daemon
level=info msg=" --remove-cilium-node-taints='true'" subsys=daemon
level=info msg=" --restore='true'" subsys=daemon
level=info msg=" --route-metric='0'" subsys=daemon
level=info msg=" --routing-mode='tunnel'" subsys=daemon
level=info msg=" --set-cilium-is-up-condition='true'" subsys=daemon
level=info msg=" --set-cilium-node-taints='true'" subsys=daemon
level=info msg=" --sidecar-istio-proxy-image='cilium/istio_proxy'" subsys=daemon
level=info msg=" --single-cluster-route='false'" subsys=daemon
level=info msg=" --skip-cnp-status-startup-clean='false'" subsys=daemon
level=info msg=" --socket-path='/var/run/cilium/cilium.sock'" subsys=daemon
level=info msg=" --srv6-encap-mode='reduced'" subsys=daemon
level=info msg=" --state-dir='/var/run/cilium'" subsys=daemon
level=info msg=" --synchronize-k8s-nodes='true'" subsys=daemon
level=info msg=" --tofqdns-dns-reject-response-code='refused'" subsys=daemon
level=info msg=" --tofqdns-enable-dns-compression='true'" subsys=daemon
level=info msg=" --tofqdns-endpoint-max-ip-per-hostname='50'" subsys=daemon
level=info msg=" --tofqdns-idle-connection-grace-period='0s'" subsys=daemon
level=info msg=" --tofqdns-max-deferred-connection-deletes='10000'" subsys=daemon
level=info msg=" --tofqdns-min-ttl='0'" subsys=daemon
level=info msg=" --tofqdns-pre-cache=''" subsys=daemon
level=info msg=" --tofqdns-proxy-port='0'" subsys=daemon
level=info msg=" --tofqdns-proxy-response-max-delay='100ms'" subsys=daemon
level=info msg=" --trace-payloadlen='128'" subsys=daemon
level=info msg=" --trace-sock='true'" subsys=daemon
level=info msg=" --tunnel=''" subsys=daemon
level=info msg=" --tunnel-port='0'" subsys=daemon
level=info msg=" --tunnel-protocol='vxlan'" subsys=daemon
level=info msg=" --unmanaged-pod-watcher-interval='15'" subsys=daemon
level=info msg=" --use-cilium-internal-ip-for-ipsec='false'" subsys=daemon
level=info msg=" --version='false'" subsys=daemon
level=info msg=" --vlan-bpf-bypass=''" subsys=daemon
level=info msg=" --vtep-cidr=''" subsys=daemon
level=info msg=" --vtep-endpoint=''" subsys=daemon
level=info msg=" --vtep-mac=''" subsys=daemon
level=info msg=" --vtep-mask=''" subsys=daemon
level=info msg=" --write-cni-conf-when-ready='/host/etc/cni/net.d/05-cilium.conflist'" subsys=daemon
level=info msg=" _ _ _" subsys=daemon
level=info msg=" ___|_| |_|_ _ _____" subsys=daemon
level=info msg="| _| | | | | | |" subsys=daemon
level=info msg="|___|_|_|_|___|_|_|_|" subsys=daemon
level=info msg="Cilium 1.14.1 c191ef6f 2023-08-10T18:54:57+02:00 go version go1.20.7 linux/amd64" subsys=daemon
level=info msg="clang (10.0.0) and kernel (5.15.0) versions: OK!" subsys=linux-datapath
level=info msg="linking environment: OK!" subsys=linux-datapath
level=info msg="Kernel config file not found: if the agent fails to start, check the system requirements at https://docs.cilium.io/en/stable/operations/system_requirements" subsys=probes
level=info msg="Detected mounted BPF filesystem at /sys/fs/bpf" subsys=bpf
level=info msg="Mounted cgroupv2 filesystem at /run/cilium/cgroupv2" subsys=cgroups
level=info msg="Parsing base label prefixes from default label list" subsys=labels-filter
level=info msg="Parsing additional label prefixes from user inputs: []" subsys=labels-filter
level=info msg="Final label prefixes to be used for identity evaluation:" subsys=labels-filter
level=info msg=" - reserved:.*" subsys=labels-filter
level=info msg=" - :io\\.kubernetes\\.pod\\.namespace" subsys=labels-filter
level=info msg=" - :io\\.cilium\\.k8s\\.namespace\\.labels" subsys=labels-filter
level=info msg=" - :app\\.kubernetes\\.io" subsys=labels-filter
level=info msg=" - !:io\\.kubernetes" subsys=labels-filter
level=info msg=" - !:kubernetes\\.io" subsys=labels-filter
level=info msg=" - !:.*beta\\.kubernetes\\.io" subsys=labels-filter
level=info msg=" - !:k8s\\.io" subsys=labels-filter
level=info msg=" - !:pod-template-generation" subsys=labels-filter
level=info msg=" - !:pod-template-hash" subsys=labels-filter
level=info msg=" - !:controller-revision-hash" subsys=labels-filter
level=info msg=" - !:annotation.*" subsys=labels-filter
level=info msg=" - !:etcd_node" subsys=labels-filter
level=info msg=Invoked duration="812.824µs" function="pprof.glob..func1 (cell.go:51)" subsys=hive
level=info msg=Invoked duration="43.532µs" function="gops.registerGopsHooks (cell.go:39)" subsys=hive
level=info msg=Invoked duration="949.631µs" function="metrics.NewRegistry (registry.go:65)" subsys=hive
level=info msg=Invoked duration=85.519648ms function="cmd.glob..func4 (daemon_main.go:1589)" subsys=hive
level=info msg="Spire Delegate API Client is disabled as no socket path is configured" subsys=spire-delegate
level=info msg="Mutual authentication handler is disabled as no port is configured" subsys=auth
level=info msg=Invoked duration="351.569µs" function="auth.registerAuthManager (cell.go:85)" subsys=hive
level=info msg=Invoked duration="11.151µs" function="gc.registerSignalHandler (cell.go:47)" subsys=hive
level=info msg=Invoked duration="15.449µs" function="utime.initUtimeSync (cell.go:30)" subsys=hive
level=info msg=Invoked duration="61.686µs" function="agentliveness.newAgentLivenessUpdater (agent_liveness.go:44)" subsys=hive
level=info msg=Invoked duration="80.16µs" function="l2responder.NewL2ResponderReconciler (l2responder.go:64)" subsys=hive
level=info msg=Invoked duration="92.774µs" function="garp.newGARPProcessor (processor.go:27)" subsys=hive
level=info msg=Starting subsys=hive
level=info msg="Started gops server" address="127.0.0.1:9890" subsys=gops
level=info msg="Start hook executed" duration="400.942µs" function="gops.registerGopsHooks.func1 (cell.go:44)" subsys=hive
level=info msg="Start hook executed" duration="2.836µs" function="metrics.NewRegistry.func1 (registry.go:86)" subsys=hive
level=info msg="Establishing connection to apiserver" host="https://10.43.0.1:443" subsys=k8s-client
level=info msg="Serving prometheus metrics on :9962" subsys=metrics
level=info msg="Connected to apiserver" subsys=k8s-client
level=info msg="Start hook executed" duration=8.152882ms function="client.(*compositeClientset).onStart" subsys=hive
level=info msg="Start hook executed" duration="13.264µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Node].Start" subsys=hive
level=info msg="Using autogenerated IPv4 allocation range" subsys=node v4Prefix=10.61.0.0/16
level=info msg="Opting out from node-to-node encryption on this node as per 'node-encryption-opt-out-labels' label selector" Selector=node-role.kubernetes.io/control-plane subsys=daemon
level=info msg="Start hook executed" duration=2.437822ms function="node.NewLocalNodeStore.func1 (local_node_store.go:77)" subsys=hive
level=info msg="Start hook executed" duration="86.513µs" function="authmap.newAuthMap.func1 (cell.go:28)" subsys=hive
level=info msg="Start hook executed" duration="25.177µs" function="configmap.newMap.func1 (cell.go:24)" subsys=hive
level=info msg="Start hook executed" duration="35.737µs" function="signalmap.newMap.func1 (cell.go:45)" subsys=hive
level=info msg="Start hook executed" duration="21.33µs" function="nodemap.newNodeMap.func1 (cell.go:24)" subsys=hive
level=info msg="Start hook executed" duration="115.817µs" function="eventsmap.newEventsMap.func1 (cell.go:36)" subsys=hive
level=info msg="Start hook executed" duration=16.363481ms function="datapath.newDatapath.func1 (cells.go:114)" subsys=hive
level=info msg="Restored 0 node IDs from the BPF map" subsys=linux-datapath
level=info msg="Start hook executed" duration="85.019µs" function="datapath.newDatapath.func2 (cells.go:127)" subsys=hive
level=info msg="Start hook executed" duration="5.36µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Service].Start" subsys=hive
level=info msg="Start hook executed" duration=200.647639ms function="*manager.diffStore[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Service].Start" subsys=hive
level=info msg="Start hook executed" duration="3.607µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s.Endpoints].Start" subsys=hive
level=info msg="Using discoveryv1.EndpointSlice" subsys=k8s
level=info msg="Start hook executed" duration=200.595901ms function="*manager.diffStore[*github.com/cilium/cilium/pkg/k8s.Endpoints].Start" subsys=hive
level=info msg="Start hook executed" duration="2.404µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2.CiliumNode].Start" subsys=hive
level=info msg="Start hook executed" duration="2.555µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Pod].Start" subsys=hive
level=info msg="Start hook executed" duration="1.803µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Namespace].Start" subsys=hive
level=info msg="Start hook executed" duration="2.014µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2.CiliumNetworkPolicy].Start" subsys=hive
level=info msg="Start hook executed" duration="29.866µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2.CiliumClusterwideNetworkPolicy].Start" subsys=hive
level=info msg="Start hook executed" duration="2.004µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumCIDRGroup].Start" subsys=hive
level=info msg="Start hook executed" duration="24.075µs" function="endpointmanager.newDefaultEndpointManager.func1 (cell.go:185)" subsys=hive
level=info msg="Start hook executed" duration="18.124µs" function="cmd.newPolicyTrifecta.func1 (policy.go:130)" subsys=hive
level=info msg="Start hook executed" duration="19.156µs" function="*manager.manager.Start" subsys=hive
level=info msg="Start hook executed" duration="128.551µs" function="*cni.cniConfigManager.Start" subsys=hive
level=info msg="Generating CNI configuration file with mode portmap" subsys=cni-config
level=info msg="Serving cilium node monitor v1.2 API at unix:///var/run/cilium/monitor1_2.sock" subsys=monitor-agent
level=info msg="Start hook executed" duration="287.74µs" function="agent.newMonitorAgent.func1 (cell.go:62)" subsys=hive
level=info msg="Start hook executed" duration="15.499µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumL2AnnouncementPolicy].Start" subsys=hive
level=info msg="Start hook executed" duration="10.99µs" function="*job.group.Start" subsys=hive
level=info msg="Start hook executed" duration="337.443µs" function="proxy.newProxy.func1 (cell.go:56)" subsys=hive
level=info msg="Envoy: Starting xDS gRPC server listening on /var/run/cilium/envoy/sockets/xds.sock" subsys=envoy-manager
level=info msg="Inheriting MTU from external network interface" device=lo ipAddr=88.200.23.61 mtu=65536 subsys=mtu
level=info msg="Removed map pin at /sys/fs/bpf/tc/globals/cilium_ipcache, recreating and re-pinning map cilium_ipcache" file-path=/sys/fs/bpf/tc/globals/cilium_ipcache name=cilium_ipcache subsys=bpf
level=info msg="Removed map pin at /sys/fs/bpf/tc/globals/cilium_tunnel_map, recreating and re-pinning map cilium_tunnel_map" file-path=/sys/fs/bpf/tc/globals/cilium_tunnel_map name=cilium_tunnel_map subsys=bpf
level=info msg="Restored services from maps" failedServices=0 restoredServices=12 subsys=service
level=info msg="Restored backends from maps" failedBackends=0 restoredBackends=13 skippedBackends=0 subsys=service
level=info msg="Reading old endpoints..." subsys=daemon
level=info msg="No old endpoints found." subsys=daemon
level=info msg="Waiting until all Cilium CRDs are available" subsys=k8s
level=info msg="All Cilium CRDs have been found and are available" subsys=k8s
level=info msg="Retrieved node information from kubernetes node" nodeName=i1 subsys=k8s
level=info msg="Received own node information from API server" ipAddr.ipv4=88.200.23.61 ipAddr.ipv6="<nil>" k8sNodeIP=88.200.23.61 labels="map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:rke2 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:i1 kubernetes.io/os:linux node-role.kubernetes.io/control-plane:true node-role.kubernetes.io/etcd:true node-role.kubernetes.io/master:true node.kubernetes.io/instance-type:rke2]" nodeName=i1 subsys=k8s v4Prefix=10.42.0.0/24 v6Prefix="<nil>"
level=info msg="k8s mode: Allowing localhost to reach local endpoints" subsys=daemon
level=info msg="Creating or updating CiliumNode resource" node=i1 subsys=nodediscovery
level=info msg="Detected devices" devices="[]" subsys=linux-datapath
level=info msg="Enabling k8s event listener" subsys=k8s-watcher
level=info msg="Removing stale endpoint interfaces" subsys=daemon
level=info msg="Waiting until local node addressing before starting watchers depending on it" subsys=k8s-watcher
level=info msg="Skipping kvstore configuration" subsys=daemon
level=info msg="Initializing node addressing" subsys=daemon
level=info msg="Initializing kubernetes IPAM" subsys=ipam v4Prefix=10.42.0.0/24 v6Prefix="<nil>"
level=info msg="Restoring endpoints..." subsys=daemon
level=info msg="Endpoints restored" failed=0 restored=0 subsys=daemon
level=info msg="Addressing information:" subsys=daemon
level=info msg=" Cluster-Name: default" subsys=daemon
level=info msg=" Cluster-ID: 0" subsys=daemon
level=info msg=" Local node-name: i1" subsys=daemon
level=info msg=" Node-IPv6: <nil>" subsys=daemon
level=info msg=" External-Node IPv4: 88.200.23.61" subsys=daemon
level=info msg=" Internal-Node IPv4: 10.42.0.101" subsys=daemon
level=info msg=" IPv4 allocation prefix: 10.42.0.0/24" subsys=daemon
level=info msg=" Loopback IPv4: 169.254.42.1" subsys=daemon
level=info msg=" Local IPv4 addresses:" subsys=daemon
level=info msg=" - 88.200.23.61" subsys=daemon
level=info msg=" - 10.100.0.111" subsys=daemon
level=info msg="Adding local node to cluster" node="{i1 default [{InternalIP 88.200.23.61} {CiliumInternalIP 10.42.0.101}] 10.42.0.0/24 [] <nil> [] 10.42.0.218 <nil> <nil> <nil> 0 local 0 map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:rke2 beta.kubernetes.io/os:linux kubernetes.io/arch:amd64 kubernetes.io/hostname:i1 kubernetes.io/os:linux node-role.kubernetes.io/control-plane:true node-role.kubernetes.io/etcd:true node-role.kubernetes.io/master:true node.kubernetes.io/instance-type:rke2] map[] 1 }" subsys=nodediscovery
level=info msg="Waiting until all pre-existing resources have been received" subsys=k8s-watcher
level=info msg="Creating or updating CiliumNode resource" node=i1 subsys=nodediscovery
level=info msg="Initializing identity allocator" subsys=identity-cache
level=info msg="Allocating identities between range" cluster-id=0 max=65535 min=256 subsys=identity-cache
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_host.forwarding sysParamValue=1
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_host.rp_filter sysParamValue=0
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_host.accept_local sysParamValue=1
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_host.send_redirects sysParamValue=0
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_net.forwarding sysParamValue=1
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_net.rp_filter sysParamValue=0
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_net.accept_local sysParamValue=1
level=info msg="Setting sysctl" subsys=sysctl sysParamName=net.ipv4.conf.cilium_net.send_redirects sysParamValue=0
level=error msg="Start hook failed" error="daemon creation failed: error while initializing daemon: failed while reinitializing datapath: failed to setup base devices in mode tunnel: invalid argument" function="cmd.newDaemonPromise.func1 (daemon_main.go:1643)" subsys=hive
level=info msg=Stopping subsys=hive
level=info msg="Stop hook executed" duration="114.835µs" function="proxy.newProxy.func2 (cell.go:71)" subsys=hive
level=info msg="Stop hook executed" duration="11.782µs" function="*job.group.Stop" subsys=hive
level=info msg="Stop hook executed" duration="15.029µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumL2AnnouncementPolicy].Stop" subsys=hive
level=info msg="Stop hook executed" duration="17.403µs" function="agent.newMonitorAgent.func2 (cell.go:92)" subsys=hive
level=info msg="Stop hook executed" duration="140.664µs" function="*cni.cniConfigManager.Stop" subsys=hive
level=info msg="Stop hook executed" duration="20.189µs" function="*manager.manager.Stop" subsys=hive
level=info msg="Stop hook executed" duration="81.001µs" function="cmd.newPolicyTrifecta.func2 (policy.go:134)" subsys=hive
level=info msg="Stop hook executed" duration="26.84µs" function="endpointmanager.newDefaultEndpointManager.func2 (cell.go:189)" subsys=hive
level=info msg="Stop hook executed" duration="89.859µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1.CiliumCIDRGroup].Stop" subsys=hive
level=info msg="Stop hook executed" duration="70.963µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2.CiliumClusterwideNetworkPolicy].Stop" subsys=hive
level=info msg="Stop hook executed" duration="93.826µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2.CiliumNetworkPolicy].Stop" subsys=hive
level=info msg="Stop hook executed" duration="59.832µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Namespace].Stop" subsys=hive
level=info msg="Stop hook executed" duration="124.554µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Pod].Stop" subsys=hive
level=info msg="Stop hook executed" duration="8.507µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2.CiliumNode].Stop" subsys=hive
level=info msg="Stop hook executed" duration="29.745µs" function="*manager.diffStore[*github.com/cilium/cilium/pkg/k8s.Endpoints].Stop" subsys=hive
level=info msg="Stop hook executed" duration="132.779µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s.Endpoints].Stop" subsys=hive
level=info msg="Stop hook executed" duration="28.834µs" function="*manager.diffStore[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Service].Stop" subsys=hive
level=info msg="Stop hook executed" duration="107.973µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Service].Stop" subsys=hive
level=info msg="Stop hook executed" duration=772ns function="eventsmap.newEventsMap.func2 (cell.go:46)" subsys=hive
level=info msg="Stop hook executed" duration="14.177µs" function="nodemap.newNodeMap.func2 (cell.go:27)" subsys=hive
level=info msg="Stop hook executed" duration="7.514µs" function="signalmap.newMap.func2 (cell.go:48)" subsys=hive
level=info msg="Stop hook executed" duration="11.19µs" function="configmap.newMap.func2 (cell.go:27)" subsys=hive
level=info msg="Stop hook executed" duration="4.869µs" function="authmap.newAuthMap.func2 (cell.go:31)" subsys=hive
level=info msg="Stop hook executed" duration="1.603µs" function="node.NewLocalNodeStore.func2 (local_node_store.go:96)" subsys=hive
level=info msg="Stop hook executed" duration="115.096µs" function="*resource.resource[*github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1.Node].Stop" subsys=hive
level=info msg="Stop hook executed" duration="23.103µs" function="client.(*compositeClientset).onStop" subsys=hive
level=info msg="Stop hook executed" duration="52.449µs" function="metrics.NewRegistry.func2 (registry.go:96)" subsys=hive
level=info msg="Stopped gops server" address="127.0.0.1:9890" subsys=gops
level=info msg="Stop hook executed" duration="161.172µs" function="gops.registerGopsHooks.func2 (cell.go:51)" subsys=hive
level=fatal msg="failed to start: daemon creation failed: error while initializing daemon: failed while reinitializing datapath: failed to setup base devices in mode tunnel: invalid argument" subsys=daemon
Fatal errors can be found at the bottom.
Hey I know this is old, but if anyone comes across this again cilium can be deployed like so:
rke2_config:
cni:
- cilium
This can be added to your rke2_common/vars/main.yml
file. So the item should be a list not a string. Also note this will not disable kube-proxy, to do that, the following is needed:
rke2_config:
cni:
- cilium
disable-kube-proxy: true
When I only add
cni: "cilium"
the node never gets intoReady
state, and when I describe it, I get:Notice the line:
Which to my understanding indicates that the Cilium CNI plugin is not installed, which I also confirm by listing
/etc/cni/net.d/
where there is no sign of Cilium.