Open wittyResry opened 1 year ago
https://kubernetes.io/zh-cn/docs/concepts/storage/volumes/#configmap
apiVersion: v1
kind: Pod
metadata:
name: configmap-pod
spec:
containers:
- name: test
image: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-ce", "tail -f /dev/null"]
volumeMounts:
- name: config-vol
mountPath: /etc/config1
- name: config-startup
mountPath: /etc/config2
volumes:
- name: config-vol
configMap:
name: cassandra-custom-run-script
items:
- key: custom-run.sh
path: custom-run.sh
- name: config-startup
configMap:
name: startup-script2
items:
- key: startup.sh
path: startup1.sh
kind: ConfigMap
apiVersion: v1
metadata:
name: startup-script2
data:
startup.sh: |-
#!/bin/bash
HOME_DIR="/home/admin"
GARBAGE_COLLECTOR="cms"
tune_linux_kernel_params() {
echo "tune linux kernel params"
ulimit -SHn 655350
ulimit -SHu 655350
sysctl -w fs.file-max=1024000
}
tune_network_params() {
echo "tune network params"
sysctl -w net.ipv4.ip_local_port_range='1000 65535'
sysctl -w net.core.somaxconn=32768
sysctl -w net.ipv4.tcp_max_syn_backlog=16384
sysctl -w net.core.netdev_max_backlog=16384
sysctl -w net.core.rmem_default=262144
sysctl -w net.core.wmem_default=262144
sysctl -w net.core.rmem_max=16777216
sysctl -w net.core.wmem_max=16777216
sysctl -w net.core.optmem_max=16777216
sysctl -w net.ipv4.tcp_rmem='1024 4096 16777216'
sysctl -w net.ipv4.tcp_wmem='1024 4096 16777216'
sysctl -w net.netfilter.nf_conntrack_max=1000000
sysctl -w net.netfilter.nf_conntrack_tcp_timeout_time_wait=30
sysctl -w net.ipv4.tcp_max_tw_buckets=1048576
}
init_user_and_dir() {
id -u admin &>/dev/null || adduser admin -D -h ${HOME_DIR}
chown admin:admin ${HOME_DIR}
su admin -c "mkdir ${HOME_DIR}/logs; mkdir ${HOME_DIR}/dump"
}
build_java_opts() {
if [[ ! -d /etc/podinfo ]]
then
MEM_TOTAL_MB=$[$(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024]
else
MEM_TOTAL_MB=$(cat /etc/podinfo/mem_limit)
fi
if [[ ${MEM_TOTAL_MB} -ge $[64 * 1024] ]]
then
echo "the app resource limit[${MEM_TOTAL_MB} MB] too large, please check if the value is set."
exit 1
fi
XMX=`echo "${MEM_TOTAL_MB} 0.75" | awk '{printf("%.0f", $1*$2)}'`
XMS=${XMX}
XMN=`echo "${XMX} 0.33" | awk '{printf("%.0f", $1*$2)}'`
XSS="1"
MAX_DIRECT_MEMORY_SIZE=`echo "${MEM_TOTAL_MB} 0.1" | awk '{printf("%.0f", $1*$2)}'`
JAVA_OPTS="-Xms${XMS}m -Xmx${XMX}m -Xss${XSS}m -XX:MaxDirectMemorySize=${MAX_DIRECT_MEMORY_SIZE}m"
JAVA_OPTS="$JAVA_OPTS -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions"
if [[ ${GARBAGE_COLLECTOR} == "cms" ]]; then
JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC -XX:CMSMaxAbortablePrecleanTime=5000"
JAVA_OPTS="$JAVA_OPTS -XX:+CMSClassUnloadingEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:+UseCMSInitiatingOccupancyOnly"
elif [[ ${GARBAGE_COLLECTOR} == "g1" ]]; then
JAVA_OPTS="$JAVA_OPTS -XX:+UseG1GC -XX:G1RSetUpdatingPauseTimePercent=10 -XX:G1ConcRefinementThreads=4 -XX:MaxGCPauseMillis=50"
JAVA_OPTS="$JAVA_OPTS -XX:G1HeapWastePercent=10 -XX:G1ReservePercent=10 -XX:G1OldCSetRegionThresholdPercent=30 -XX:+MonitorInUseLists"
JAVA_OPTS="$JAVA_OPTS -XX:InitiatingHeapOccupancyPercent=70 -XX:G1MixedGCLiveThresholdPercent=75 -XX:+G1EagerReclaimHumongousObjects"
fi
JAVA_OPTS="$JAVA_OPTS -XX:ConcGCThreads=4 -XX:ParallelGCThreads=8"
JAVA_OPTS="$JAVA_OPTS -XX:-ResizePLAB -XX:-ParallelRefProcEnabled -XX:-UseBiasedLocking"
JAVA_OPTS="$JAVA_OPTS -Xloggc:${HOME_DIR}/logs/gc.log -XX:GCLogFileSize=10m -XX:NumberOfGCLogFiles=10 -XX:+UseGCLogFileRotation"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime"
JAVA_OPTS="$JAVA_OPTS -XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${HOME_DIR}/dump/"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintFlagsFinal -XX:+PrintReferenceGC"
}
start_up() {
now=`date "+%Y-%m-%d %H:%M:%S"`
echo "$now--------------------------"
while getopts 't:g:' OPT; do
case ${OPT} in
t) TUNE=${OPTARG};;
g) GARBAGE_COLLECTOR=${OPTARG};;
*) ;;
esac
done
echo "tune mode: ${TUNE}"
if [[ ${TUNE} == 1 ]]; then
tune_linux_kernel_params
elif [[ ${TUNE} == 2 ]]; then
tune_network_params
elif [[ ${TUNE} == 3 ]]; then
tune_linux_kernel_params
tune_network_params
fi
init_user_and_dir
build_java_opts
echo "start to launch app..."
su admin -c "cd ${HOME_DIR}/app; java ${JAVA_OPTS} -jar app.jar"
}
start_up "$@"
kind: ConfigMap
apiVersion: v1
metadata:
name: cassandra-custom-run-script
data:
custom-run.sh: |-
perl -pi -e "s/# phi_convict_threshold: 8/phi_convict_threshold: 12/g" /opt/bitnami/cassandra/conf/cassandra.yaml
exec /opt/bitnami/scripts/cassandra/run.sh
通过initContainer共享/etc/resources/api-gateway目录
apiVersion: v1
kind: Pod
metadata:
name: configmap-pod
spec:
initContainers:
- command:
- sh
- -c
- cp -r /home/admin/resources/api-gateway/. /etc/resources/api-gateway
image: xxx:1.0.0
imagePullPolicy: IfNotPresent
name: file-resource-api-gateway
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/resources
name: file-resource-api-gateway
containers:
- name: test
image: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh", "-ce", "tail -f /dev/null"]
volumeMounts:
- name: config-vol
mountPath: /etc/config1
- name: config-startup
mountPath: /etc/config2
- name: file-resource-api-gateway
mountPath: /etc/resources
volumes:
- name: config-vol
configMap:
name: cassandra-custom-run-script
items:
- key: custom-run.sh
path: custom-run.sh
- name: config-startup
configMap:
name: startup-script2
items:
- key: startup.sh
path: startup1.sh
- name: file-resource-api-gateway
emptyDir: {}
PV
PersistentVolume(PV)是集群中由管理员配置的一段网络存储。 它是集群中的资源,就像节点是集群资源一样。 PV是容量插件,如Volumes,但其生命周期独立于使用PV的任何单个pod。 此API对象捕获存储实现的详细信息
PVC
PersistentVolumeClaim(PVC)是由用户进行存储的请求。 它类似于pod。 Pod消耗节点资源,PVC消耗PV资源。Pod可以请求特定级别的资源(CPU和内存), PVC声明可以请求特定存储的大小和访问模式(例如,可以一次读/写或多次只读)。
StorageClass为管理员提供了一种描述他们提供的存储的“类”的方法。 不同的类可能映射到服务质量级别,或备份策略,或者由群集管理员确定的任意策略。
PV生命周期
Available – 资源尚未被claim使用 Bound – 卷已经被绑定到claim了 Released – claim被删除,卷处于释放状态,但未被集群回收。 Failed – 卷自动回收失败
PV组件
PVC和PV是一一对应的。 Volume Controller:专门处理持久化存储的控制器 PersistentVolumeController :处理pv和pvc PersistentVolumeController 会不断地查看当前每一个 PVC,是不是已经处于 Bound(已绑定)状态。如果不是,那它就会遍历所有的、可用的 PV,并尝试将其与这个“单身”的 PVC 进行绑定。这样,Kubernetes 就可以保证用户提交的每一个 PVC,只要有合适的 PV 出现,它就能够很快进入绑定状态
PVC和PV绑定条件: 1)storageClassName 字段一致 2)PV 满足 PVC 的 spec 字段