Closed mktkhr closed 4 months ago
ansible-playbook -i inventory/myCluster/hosts.yaml -K --become --become-user=root cluster.yml
実行時にラズパイでエラー
k8s-cluster.yml
に enable_nodelocaldns: false
を設定して解決admin.conf
が生成されない
k8s-cluster.yml
に kubeconfig_localhost: yes
を設定すると,/myCluster/attifact
フォルダが生成され,その中にconfが入るとの記載があったが,artifactディレクトリは生成されなかったetcd
のバックアップが終わらない
日曜日 21 4月 2024 19:13:24 +0900 (0:00:00.985) 0:12:45.582 ****************
RUNNING HANDLER [etcd : Backup etcd v3 data]
*************************************************************************************************************************
kubectl get node
でコントロールプレーンのみしか表示されなかった名前 | ハードウェア | メモリ | OS | ローカルIP |
---|---|---|---|---|
kube-controlplane-01 | intel core i5-4690 | 32GB | Ubuntu Desktop 24.04 LTS | 192.168.0.140 |
ems-raspberrypi-5 | RaspberryPi 5 | 8GB | Ubuntu Server 24.04 LTS | 192.168.0.111 |
ems-raspberrypi-4b | RaspberryPi 4B | 8GB | Ubuntu Server 24.04 LTS | 192.168.0.222 |
sudo apt-get update
sudo apt install vim
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg gpg
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin kubelet kubeadm kubectl
sudo swapoff -a
sudo vim /etc/fstab
- /swap.img none swap sw 0 0
+ # /swap.img none swap sw 0 0
ipv4forwardingを有効化
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
// モジュールの読み込みを確認
lsmod | grep br_netfilter
lsmod | grep overlay
// ip forwardingの設定が効いていることを確認
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
containerdの設定
sudo containerd config default | sudo tee /etc/containerd/config.toml
sudo vim /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
...
- SystemdCgroup = false
+ SystemdCgroup = true
sudo systemctl restart containerd
CNIプラグインの追加
wget https://github.com/containernetworking/plugins/releases/download/v1.4.1/cni-plugins-linux-amd64-v1.4.1.tgz
sudo mkdir -p /opt/cni/bin
sudo tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.4.1.tgz
kubeadm config print init-defaults
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
# 適宜変更すること
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.0.140
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: kube-controlplane-01
taints: null
kubeletExtraArgs:
node-ip: 192.168.0.140
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.30.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 192.168.0.50/16
scheduler: {}
sudo kubeadm init --config ~/server/stamp-iot/kubeadm/init-config.yaml
kubeadm token list
でtokenを確認可能kubeadm token create
で再生成するroot以外でもkubectlを実行できるように変更
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
control-planeが生成されていることを確認する
kubectl get node
NAME STATUS ROLES AGE VERSION
kube-controlplane-01 NotReady control-plane 6m45s v1.30.0
kubectl --kubeconfig ./admin.conf get nodes
CNIのインストール
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/tigera-operator.yaml
mkdir -p ~/home/server/stamp-iot/colico && cd ~/home/server/stamp-iot/colico
wget https://projectcalico.docs.tigera.io/manifests/custom-resources.yaml
sudo vim custom-resources.yaml
- cidr: 192.168.0.0/16
# kubeadm init で podSubnet として指定した値
+ cidr: 192.168.0.50/16
kubectl apply -f /home/ems/server/stamp-iot/calico/custom-resources.yaml
kubectl get pod -n calico-system -o wide
サービスアカウントの作成
mkdir -p /home/ems/server/stamp-iot/service-account
sudo vim /home/ems/server/stamp-iot/service-account/admin-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ems-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ems-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: ems-admin
namespace: kube-system
kubectl apply -f /home/ems/server/stamp-iot/service-account/admin-service-account.yaml
sudo vim /home/ems/server/stamp-iot/service-account/admin-service-account-token.yaml
apiVersion: v1
kind: Secret
metadata:
name: admin-service-account-token
namespace: kube-system
annotations:
kubernetes.io/service-account.name: ems-admin
type: kubernetes.io/service-account-token
kubectl apply -f /home/ems/server/stamp-iot/service-account/admin-service-account-token.yaml
kubectl describe secrets admin-service-account-token -n kube-system
ダッシュボードを入れてみる
kubectl --kubeconfig ./admin.conf apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
kubectl --kubeconfig ./admin.conf proxy
http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login
でログイン画面表示mkdir ~/kubeadm && cd ~/kubeadm
kubeadm config print join-defaults
apiVersion: kubeadm.k8s.io/v1beta3
caCertPath: /etc/kubernetes/pki/ca.crt
discovery:
bootstrapToken:
apiServerEndpoint: 192.168.0.140:6443
token: <適宜変更する>
caCertHashes:
- sha256:<適宜変更する>
unsafeSkipCAVerification: true
timeout: 5m0s
tlsBootstrapToken: <適宜変更する>
kind: JoinConfiguration
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: <好きな名前に変更>
taints: null
kubeletExtraArgs:
node-ip: <適宜変更する>
sudo kubeadm join --config ~/kubeadm/join-config.yaml
3つのnodeが作成されていることを確認
kubectl --kubeconfig ~/.kube/admin.conf get nodes
NAME STATUS ROLES AGE VERSION
ems-raspberrypi-4b Ready <none> 8h v1.30.0
ems-raspberrypi-5 Ready <none> 8h v1.30.0
kube-controlplane-01 Ready control-plane 20h v1.30.0
目的
期待結果