rightly / information

0 stars 0 forks source link

쿠버네티스 #1

Open rightly opened 4 years ago

rightly commented 4 years ago

모든 작업은 root 권한으로 수행

sudo 유저 추가

useradd {유저명}
passwd {유저명}
usermod -aG wheel {유저명}
visudo 

## Same thing without a password
# %wheel  ALL=(ALL)       NOPASSWD: ALL <-- 주석 제거

마스터 노드 HA 구성 (3대)

설치 순서
- Master
1. Keepalived
2. Haproxy
3. Docker
4. Kubernetes
5. Master 클러스터링
- Worker
1. Docker
2. Kubernetes
3. 클러스터링

Keepalived 설치

각 서버에서 수행

yum install -y keepalived
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak # 원본 백업
vim /etc/keepalived/keepalived.conf
# 마스터 1번 서버
global_defs { 
    notification_email { 
        test@test.com
        test2@test.com
    }
    notification_email_from lb1@test.com
    smtp_server localhost
    smtp_connect_timeout 30
} 
# haproxy 활성화 체크
vrrp_script chk_haproxy {
    script "killall -0 haproxy"
    interval 2
    weight 2
} 

vrrp_instance VI_1 {
    state MASTER
    interface eth0 # 사용할 인터페이스 설정 (ifconfig 로확인 가능)
    virtual_router_id 51 # Master Node 3대가 모두 같은 값이어야 한다. (최대 255까지설정가능)
    priority 101 # 우선순위 설정 (최대 255까지 설정가능)
    advert_int 1 # VRRP패킷 송신 간격 설정 (초단위로 지정)
    authentication {
        auth_type PASS # 평문 인증 설정
        auth_pass 1111 # 인증을 위한 키 (All Master 동일값 설정) 
    } 
    virtual_ipaddress {
        <LoadBalancer_IP> # VIP설정 - ping을 통해 현재 사용되지 않고 있는 ip로 설정 
    }
    track_script { 
        chk_haproxy 
    } 
}

# 마스터 2번 서버
# Master Node 2 에서는 Master Node 1 과 동일하게 구성하되 state와 priority만 다르게 설정 한다.
global_defs { 
    notification_email { 
        test@test.com
        test2@test.com
    }
    notification_email_from lb1@test.com
    smtp_server localhost
    smtp_connect_timeout 30
} 
# haproxy 활성화 체크
vrrp_script chk_haproxy {
    script "killall -0 haproxy"
    interval 2
    weight 2
} 

vrrp_instance VI_2 {
    state BACKUP
    interface eth0 # 사용할 인터페이스 설정 (ifconfig 로확인 가능)
    virtual_router_id 51 # Master Node 3대가 모두 같은 값이어야 한다. (최대 255까지설정가능)
    priority 100 # 우선순위 설정 (최대 255까지 설정가능)
    advert_int 1 # VRRP패킷 송신 간격 설정 (초단위로 지정)
    authentication {
        auth_type PASS # 평문 인증 설정
        auth_pass 1111 # 인증을 위한 키 (All Master 동일값 설정) 
    } 
    virtual_ipaddress {
        <LoadBalancer_IP> # VIP설정 - ping을 통해 현재 사용되지 않고 있는 ip로 설정 
    }
    track_script { 
        chk_haproxy 
    } 
}

# 마스터 3번 서버
# Master Node 3 에서는 Master Node 1 과 동일하게 구성하되 state와 priority만 다르게 설정 한다.
global_defs { 
    notification_email { 
        test@test.com
        test2@test.com
    }
    notification_email_from lb1@test.com
    smtp_server localhost
    smtp_connect_timeout 30
} 
# haproxy 활성화 체크
vrrp_script chk_haproxy {
    script "killall -0 haproxy"
    interval 2
    weight 2
} 

vrrp_instance VI_3 {
    state BACKUP
    interface eth0 # 사용할 인터페이스 설정 (ifconfig 로확인 가능)
    virtual_router_id 51 # Master Node 3대가 모두 같은 값이어야 한다. (최대 255까지설정가능)
    priority 99 # 우선순위 설정 (최대 255까지 설정가능)
    advert_int 1 # VRRP패킷 송신 간격 설정 (초단위로 지정)
    authentication {
        auth_type PASS # 평문 인증 설정
        auth_pass 1111 # 인증을 위한 키 (All Master 동일값 설정) 
    } 
    virtual_ipaddress {
        <LoadBalancer_IP> # VIP설정 - ping을 통해 현재 사용되지 않고 있는 ip로 설정 
    }
    track_script { 
        chk_haproxy 
    } 
}

# Keepalived 실행
systemctl enable keepalived && systemctl start keepalived

HAProxy 설치 (마스터 공통)

yum install -y haproxy
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak # 원본 백업
vim /etc/haproxy/haproxy.cfg

global
    log 127.0.0.1 local2
    maxconn 2000
    uid 0
    gid 0
    daemon # background process

defaults
    log global # global 설정 사용
    mode tcp # SSL 통신을 위해서는 TCP모드로 (http모드는 SSL 안됨)
    option tcplog
    option dontlognull # 데이터가 전송되지 않은 연결 로깅 제외
    retries 3 # 연결요청 재시도 횟수
    maxconn 2000 #option redispatch
    #timeout http-request 10s
    #timeout queue 1m
    timeout connect 10s
    timeout client 1m
    timeout server 1m

frontend ssl_front {VIP}:26443 # local ip
    #bind 10.41.0.226:16443 #VIP (kube-master 와 같은 머신을 사용하므로 port를 kube-api 서버와 다르게 설정)
    default_backend ssl_backend

backend ssl_backend
    balance roundrobin
    option tcp-check # ssl-hello-chk option 사용하지 말것 - ssl3.0 protocol 이라 k8s api 서버 오류 유발 (TLS 1.2 이상만 지원)
    server hostname1 10.41.0.226:6443 check
    server hostname2 10.41.2.22:6443 check
    server hostname3 10.41.166.104:6443 check

systemctl enable haproxy && systemctl start haproxy

도커 설치 (Master, Worker 공통)

https://crystalcube.co.kr/195?category=834418

# Install Docker CE
## Set up the repository
### Install required packages.
yum install -y yum-utils device-mapper-persistent-data lvm2

### Add Docker repository.
yum-config-manager --add-repo \
  https://download.docker.com/linux/centos/docker-ce.repo

## Install Docker CE.
yum update -y && yum install -y \
  containerd.io-1.2.10 \
  docker-ce-19.03.4 \
  docker-ce-cli-19.03.4

systemctl start docker&& systemctl enable docker

# cgruopdriver 변경
https://waspro.tistory.com/556
/usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd

# Restart Docker
systemctl daemon-reload
systemctl restart docker

usermod -aG docker {userId}

docker run hello-world

쿠버네티스 설치 (Master, Worker 공통)

https://crystalcube.co.kr/198?category=834418

## selinux off
setenforce 0

## iptable 설정
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

## firewalld 비활성화
systemctl stop firewalld
systemctl disable firewalld

## swap off
swapoff -a
#/dev/mapper/centos-swap swap                    swap    defaults        0 0 <- /etc/fstab 주석

## 리부팅
reboot

## 쿠버네티스 repo 설정
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF

## 설치
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

## 데몬 실행
systemctl enable kubelet && systemctl start kubelet

## 다시한번 설정
echo 1 > /proc/sys/net/ipv4/ip_forward

# cgruopdriver 변경
# https://waspro.tistory.com/556
# /usr/lib/systemd/system/kubelet.service
ExecStart=/usr/bin/kubelet --exec-opt native.cgroupdriver=systemd

# systemctl daemon-reload
# systemctl restart kubelet

Master HA 설정

vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.17.3
controlPlaneEndpoint: "10.41.0.226:26443" #<- haproxy 설정 frontend
networking:
  podSubnet: "10.244.0.0/16"

kubeadm init --config=kubeadm-config.yaml --upload-certs

## flannel 설치
# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml

완료 후 각 2번 3번 Master 서버 Join

etcd HA 구성

kubelet systemd 변경

cat << EOF > /usr/lib/systemd/system/kubelet.service.d/20-etcd-service-manager.conf
[Service]
ExecStart=
#  Replace "systemd" with the cgroup driver of your container runtime. The default value in the kubelet is "cgroupfs".
ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd
Restart=always
EOF
systemctl daemon-reload && systemctl restart kubelet

아래 스크립트로 각 마스터에서 kubeadm 파일 구성
# HOST0, HOST1, HOST2를 실 사용 IP로 업데이트 
export HOST0=10.41.0.226
export HOST1=10.41.2.22
export HOST2=10.41.166.104
mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/
ETCDHOSTS=(${HOST0} ${HOST1} ${HOST2})
NAMES=("infra0" "infra1" "infra2")
for i in "${!ETCDHOSTS[@]}"; do
    HOST=${ETCDHOSTS[$i]}
    NAME=${NAMES[$i]}
    cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta2"
kind: ClusterConfiguration
etcd:
    local:
        serverCertSANs:
        - "${HOST}"
        peerCertSANs:
        - "${HOST}"
        extraArgs:
            initial-cluster: ${NAMES[0]}=https://${ETCDHOSTS[0]}:2380,${NAMES[1]}=https://${ETCDHOSTS[1]}:2380,${NAMES[2]}=https://${ETCDHOSTS[2]}:2380
            initial-cluster-state: new
            name: ${NAME}
            listen-peer-urls: https://${HOST}:2380
            listen-client-urls: https://${HOST}:2379
            advertise-client-urls: https://${HOST}:2379
            initial-advertise-peer-urls: https://${HOST}:2380
EOF
done

# 인증 기관 생성
kubeadm init phase certs etcd-ca

# 아래 두 파일 생성 확인
# /etc/kubernetes/pki/etcd/ca.crt
# /etc/kubernetes/pki/etcd/ca.key

# Master Node 1 에서만 작성
$ kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml
$ kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml
$ kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
$ kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
$ cp -R /etc/kubernetes/pki /tmp/${HOST2}/

# cleanup non-reusable certificates
$ find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete

$ kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml
$ kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml
$ kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
$ kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
$ cp -R /etc/kubernetes/pki /tmp/${HOST1}/
$ find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete

$ kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml
$ kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml
$ kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml
$ kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml
# No need to move the certs because they are for HOST0

# clean up certs that should not be copied off this host
$ find /tmp/${HOST2} -name ca.key -type f -delete
$ find /tmp/${HOST1} -name ca.key -type f -delete

출처: https://skysoo1111.tistory.com/48 [source]

쿠버네티스 마스터노드 (3대 HA 안하는 경우)


## init
# kubeadm init --pod-network-cidr=10.244.0.0/16

## join token 복사해놓기
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config

// root 에서 실행할거면 환경변수 등록
# export KUBECONFIG=/etc/kubernetes/admin.conf

## flannel 설치
# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml

# kubectl get pods --all-namespaces

NAMESPACE     NAME                                            READY   STATUS    RESTARTS   AGE
kube-system   coredns-6955765f44-bhcbq                        0/1     Running   0          15m
kube-system   coredns-6955765f44-zvtwt                        0/1     Running   0          15m
kube-system   etcd-foundry-app-master001                      1/1     Running   0          16m
kube-system   kube-apiserver-foundry-app-master001            1/1     Running   0          16m
kube-system   kube-controller-manager-foundry-app-master001   1/1     Running   0          16m
kube-system   kube-flannel-ds-amd64-gwmpd                     1/1     Running   0          23s
kube-system   kube-proxy-tg6bq                                1/1     Running   0          15m
kube-system   kube-scheduler-foundry-app-master001            1/1     Running   0          16m
rightly commented 4 years ago

https://skysoo1111.tistory.com/48 https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/