Open nmiculinic opened 5 years ago
The container runtime is CRI containerd, 1.2.5-1 with default settings
kube-proxy log:
root@ip-10-102-12-217:/home/ubuntu# kubectl logs -n kube-system kube-proxy-btrc7
I0426 13:32:43.709474 1 server_others.go:148] Using iptables Proxier.
I0426 13:32:43.709624 1 server_others.go:178] Tearing down inactive rules.
E0426 13:32:43.735276 1 proxier.go:583] Error removing iptables rules in ipvs proxier: error deleting chain "KUBE-MARK-MASQ": exit status 1: iptables: Too many links.
I0426 13:32:44.077351 1 server.go:483] Version: v1.13.5
I0426 13:32:44.089921 1 conntrack.go:52] Setting nf_conntrack_max to 131072
I0426 13:32:44.090899 1 config.go:102] Starting endpoints config controller
I0426 13:32:44.090916 1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I0426 13:32:44.090946 1 config.go:202] Starting service config controller
I0426 13:32:44.090951 1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I0426 13:32:44.191408 1 controller_utils.go:1034] Caches are synced for endpoints config controller
I0426 13:32:44.191432 1 controller_utils.go:1034] Caches are synced for service config controller
root@ip-10-102-12-217:/home/ubuntu# iptables-save
# Generated by iptables-save v1.6.1 on Fri Apr 26 13:33:29 2019
*mangle
:PREROUTING ACCEPT [425193:88869358]
:INPUT ACCEPT [425069:88859363]
:FORWARD ACCEPT [124:9995]
:OUTPUT ACCEPT [433796:92762354]
:POSTROUTING ACCEPT [433920:92772349]
-A PREROUTING -i ens5 -p tcp -m tcp --dport 30000:32767 -m comment --comment "NodePort Mark" -j CONNMARK --set-xmark 0x2000/0xffffffff
-A PREROUTING -i ens5 -p udp -m udp --dport 30000:32767 -m comment --comment "NodePort Mark" -j CONNMARK --set-xmark 0x2000/0xffffffff
-A PREROUTING -i veth+ -m comment --comment "NodePort Mark" -j CONNMARK --restore-mark --nfmask 0xffffffff --ctmask 0xffffffff
COMMIT
# Completed on Fri Apr 26 13:33:29 2019
# Generated by iptables-save v1.6.1 on Fri Apr 26 13:33:29 2019
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [9:540]
:POSTROUTING ACCEPT [9:540]
:CNI-289f8429852874a375587e0c - [0:0]
:CNI-316726564ef5149a5e76a66b - [0:0]
:CNI-da43b94afd0290c4b62c265b - [0:0]
:DOCKER - [0:0]
:KUBE-MARK-DROP - [0:0]
:KUBE-MARK-MASQ - [0:0]
:KUBE-NODEPORTS - [0:0]
:KUBE-POSTROUTING - [0:0]
:KUBE-SEP-EKJB6BH2XW7NVORN - [0:0]
:KUBE-SEP-L652SK4V4OGOOLWE - [0:0]
:KUBE-SEP-UTSIPQOYE4BDJTCB - [0:0]
:KUBE-SEP-ZGOVZK7B4KARN3FQ - [0:0]
:KUBE-SERVICES - [0:0]
:KUBE-SVC-FAITROITGXHS3QVF - [0:0]
:KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0]
:KUBE-SVC-QKJQYQZXY3DRLPVB - [0:0]
:KUBE-SVC-ZRLRAB2E5DTUX37C - [0:0]
-A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -s 10.102.11.165/32 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"dea0871c209fc45c7b49e750d4443fac9867ef6e1223e43ab6cddbea693a85a5\"" -j CNI-289f8429852874a375587e0c
-A POSTROUTING -s 10.102.9.144/32 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"c4b1f623ec28058565e8d499f98e7de6dd1a784f86ff97a8eb87a28dea20525b\"" -j CNI-316726564ef5149a5e76a66b
-A POSTROUTING -s 10.102.4.6/32 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"766baf645eb23001276c7462aadbbff81f3f84f10ec7910d8d7cbbbb0cbca047\"" -j CNI-da43b94afd0290c4b62c265b
-A CNI-289f8429852874a375587e0c -d 10.102.11.165/32 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"dea0871c209fc45c7b49e750d4443fac9867ef6e1223e43ab6cddbea693a85a5\"" -j ACCEPT
-A CNI-289f8429852874a375587e0c ! -d 224.0.0.0/4 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"dea0871c209fc45c7b49e750d4443fac9867ef6e1223e43ab6cddbea693a85a5\"" -j MASQUERADE
-A CNI-316726564ef5149a5e76a66b -d 10.102.9.144/32 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"c4b1f623ec28058565e8d499f98e7de6dd1a784f86ff97a8eb87a28dea20525b\"" -j ACCEPT
-A CNI-316726564ef5149a5e76a66b ! -d 224.0.0.0/4 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"c4b1f623ec28058565e8d499f98e7de6dd1a784f86ff97a8eb87a28dea20525b\"" -j MASQUERADE
-A CNI-da43b94afd0290c4b62c265b -d 10.102.4.6/32 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"766baf645eb23001276c7462aadbbff81f3f84f10ec7910d8d7cbbbb0cbca047\"" -j ACCEPT
-A CNI-da43b94afd0290c4b62c265b ! -d 224.0.0.0/4 -m comment --comment "name: \"cni-ipvlan-vpc-k8s\" id: \"766baf645eb23001276c7462aadbbff81f3f84f10ec7910d8d7cbbbb0cbca047\"" -j MASQUERADE
-A DOCKER -i docker0 -j RETURN
-A KUBE-MARK-DROP -j MARK --set-xmark 0x8000/0x8000
-A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-SEP-EKJB6BH2XW7NVORN -s 10.102.12.217/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-EKJB6BH2XW7NVORN -p tcp -m tcp -j DNAT --to-destination 10.102.12.217:6443
-A KUBE-SEP-L652SK4V4OGOOLWE -s 10.102.11.165/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-L652SK4V4OGOOLWE -p tcp -m tcp -j DNAT --to-destination 10.102.11.165:53
-A KUBE-SEP-UTSIPQOYE4BDJTCB -s 10.102.11.165/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-UTSIPQOYE4BDJTCB -p udp -m udp -j DNAT --to-destination 10.102.11.165:53
-A KUBE-SEP-ZGOVZK7B4KARN3FQ -s 10.102.11.165/32 -j KUBE-MARK-MASQ
-A KUBE-SEP-ZGOVZK7B4KARN3FQ -p tcp -m tcp -j DNAT --to-destination 10.102.11.165:9153
-A KUBE-SERVICES ! -s 10.102.0.0/15 -d 10.233.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -d 10.233.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y
-A KUBE-SERVICES ! -s 10.102.0.0/15 -d 10.233.0.3/32 -p tcp -m comment --comment "kube-system/coredns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -d 10.233.0.3/32 -p tcp -m comment --comment "kube-system/coredns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-QKJQYQZXY3DRLPVB
-A KUBE-SERVICES ! -s 10.102.0.0/15 -d 10.233.0.3/32 -p udp -m comment --comment "kube-system/coredns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -d 10.233.0.3/32 -p udp -m comment --comment "kube-system/coredns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-ZRLRAB2E5DTUX37C
-A KUBE-SERVICES ! -s 10.102.0.0/15 -d 10.233.0.3/32 -p tcp -m comment --comment "kube-system/coredns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ
-A KUBE-SERVICES -d 10.233.0.3/32 -p tcp -m comment --comment "kube-system/coredns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-FAITROITGXHS3QVF
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
-A KUBE-SVC-FAITROITGXHS3QVF -j KUBE-SEP-L652SK4V4OGOOLWE
-A KUBE-SVC-NPX46M4PTMTKRN6Y -j KUBE-SEP-EKJB6BH2XW7NVORN
-A KUBE-SVC-QKJQYQZXY3DRLPVB -j KUBE-SEP-ZGOVZK7B4KARN3FQ
-A KUBE-SVC-ZRLRAB2E5DTUX37C -j KUBE-SEP-UTSIPQOYE4BDJTCB
COMMIT
# Completed on Fri Apr 26 13:33:29 2019
# Generated by iptables-save v1.6.1 on Fri Apr 26 13:33:29 2019
*filter
:INPUT ACCEPT [1428:266276]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [1417:282975]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:KUBE-EXTERNAL-SERVICES - [0:0]
:KUBE-FIREWALL - [0:0]
:KUBE-FORWARD - [0:0]
:KUBE-SERVICES - [0:0]
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A INPUT -j KUBE-FIREWALL
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o docker0 -j DOCKER
-A FORWARD -i docker0 ! -o docker0 -j ACCEPT
-A FORWARD -i docker0 -o docker0 -j ACCEPT
-A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES
-A OUTPUT -j KUBE-FIREWALL
-A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER -j RETURN
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -s 10.102.0.0/15 -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-FORWARD -d 10.102.0.0/15 -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
# Completed on Fri Apr 26 13:33:29 2019
The instance is r5.xlarge and is affected by broken_cidr:
root@ip-10-102-12-217:/opt/cni/bin# ./cni-ipvlan-vpc-k8s-tool bugs
bug afflicted
broken_cidr true
root@ip-10-102-12-217:/opt/cni/bin# ./cni-ipvlan-vpc-k8s-tool eniif
iface mac id subnet subnet_cidr secgrps vpc ips
ens5 02:8a:f5:e6:2b:5c eni-064bbef63b8bdfbfd subnet-058dd62d2f30e5821 10.102.0.0/20 [sg-033900a59605dc7c9] vpc-0c64ea763ba7d43e1 [10.102.12.217]
ens6 02:41:30:5f:c5:fc eni-0336dcce1927e96c8 subnet-058dd62d2f30e5821 10.102.0.0/20 [sg-033900a59605dc7c9] vpc-0c64ea763ba7d43e1 [10.102.9.83 10.102.11.165 10.102.9.144]
root@ip-10-102-12-217:/opt/cni/bin# ./cni-ipvlan-vpc-k8s-tool vpccidr
iface metadata cidr aws api cidr
ens5 [10.102.0.0/16] [10.102.0.0/16]
ens6 [10.102.0.0/16] [10.102.0.0/16]
root@ip-10-102-12-217:/opt/cni/bin# ./cni-ipvlan-vpc-k8s-tool vpcpeercidr
iface peer_dcidr
ens5 []
ens6 []
root@ip-10-102-12-217:/opt/cni/bin#
poking around the tool seems fine to me
The setup is as follows:
client & node aren't directly connected, there's vpn node in between them.
I can do the following pings/netcats (( i.e. testing connectivity )):
client <--> node (( and vice versa )) vpn node <--> pod (( and vice versa )) pod --> client (( but not vice versa ))
Upon closer inspection (( that is running tcpdump on the node I see the following )):
The AWS properly routes the package to the node, yet there's no reply if the source address is outside VPC stack.
Version: v0.5.0
Configuration:
(( the {{ }} placeholders are filler with proper values )).
I've marked single private subnet with the needed tag, and it's
10.102.0.0/20
subnet. VPN node is in a different subnet (since it has public IP).