Start fuse process use systemd is one way to solve the problem
this is systemd example :
csi node-plugin shoud create it when NodeStageVolume
[root@k8s1 test]# cat /run/systemd/transient/mfs-fuse-pvc-5be4a515-13a1-4c81-b3ac-b93713bc04de.service
# This is a transient unit file, created programmatically via the systemd API. Do not edit.
[Unit]
Description=mfs fuse mount of pvc-5be4a515-13a1-4c81-b3ac-b93713bc04de
[Service]
ExecStart=
ExecStart="mount" "-f" "-t" "moosefs" "10.109.148.254:9421:/liuchang-test" "/var/lib/kubelet/plugins/kubernetes.io/csi/liuchang-test.csi.moosefs.com/e7673584d954b10a7e08aea91ae532734ebc4300f3f2a621c7e907d03025b0ac/globalmount"
ExecStopPost=
ExecStopPost="/bin/umount" "-f" "-l" "/var/lib/kubelet/plugins/kubernetes.io/csi/liuchang-test.csi.moosefs.com/e7673584d954b10a7e08aea91ae532734ebc4300f3f2a621c7e907d03025b0ac/globalmount"
[Unit]
CollectMode=inactive-or-failed
[root@k8s1-dev-common1.pgy01.ynode.cn test]#
I have fix it in my env, this is a test that recreate node-plugin and mountpoint is healthy
[root@k8s1 demo]# kubectl get pod -n mfs -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
csi-moosefs-controller-liuchang-test-0 4/4 Running 0 20m 10.55.140.59 k8s1 <none> <none>
csi-moosefs-node-liuchang-test-894jr 2/2 Running 0 20m 10.55.140.60 k8s2 <none> <none>
csi-moosefs-node-liuchang-test-965c9 2/2 Running 0 20m 10.55.140.61 k8s3 <none> <none>
csi-moosefs-node-liuchang-test-pq96r 2/2 Running 0 9m37s 10.55.140.59 k8s1 <none> <none>
[root@k8s1-dev-common1.pgy01.ynode.cn demo]# kubectl delete pod -n mfs csi-moosefs-node-liuchang-test-pq96r
pod "csi-moosefs-node-liuchang-test-pq96r" deleted
[root@k8s1 demo]#
[root@k8s1 demo]#
[root@k8s1 demo]# systemctl status mfs-fuse-pvc-79be2cc4-411e-47ed-947f-dbd2150b40e6
● mfs-fuse-pvc-79be2cc4-411e-47ed-947f-dbd2150b40e6.service - mfs fuse mount of pvc-79be2cc4-411e-47ed-947f-dbd2150b40e6
Loaded: loaded (/run/systemd/transient/mfs-fuse-pvc-79be2cc4-411e-47ed-947f-dbd2150b40e6.service; transient)
Transient: yes
Active: active (running) since Tue 2023-12-26 14:40:18 CST; 1min 56s ago
Main PID: 203854 (mount)
Tasks: 17 (limit: 153589)
Memory: 263.6M
CPU: 534ms
CGroup: /system.slice/mfs-fuse-pvc-79be2cc4-411e-47ed-947f-dbd2150b40e6.service
├─203854 mount -f -t moosefs 10.109.148.254:9421:/liuchang-test /var/lib/kubelet/plugins/kubernetes.io/csi/liuchang-test.csi.moosefs.com/73d5febbce3f883aac9afb4f4311ff27dafc29eb61b4fd9c24a1a68327d59a9f/globalmount
└─203855 "mfsmount (mounted on: /data1/kubelet/plugins/kubernetes.io/csi/liuchang-test.csi.moosefs.com/73d5febbce3f883aac9afb4f4311ff27dafc29eb61b4fd9c24a1a68327d59a9f/globalmount)"
Dec 26 14:40:18 k8s1 mount[203855]: mfsmount[203855]: out of memory killer disabled
Dec 26 14:40:18 k8s1 mount[203855]: mfsmount[203855]: monotonic clock function: clock_gettime
Dec 26 14:40:18 k8s1 mfsmount[203855]: setting glibc malloc arena max to 4
Dec 26 14:40:18 k8s1 mfsmount[203855]: setting glibc malloc arena test to 4
Dec 26 14:40:18 k8s1 mfsmount[203855]: out of memory killer disabled
Dec 26 14:40:18 k8s1 mfsmount[203855]: monotonic clock function: clock_gettime
Dec 26 14:40:18 k8s1 mount[203855]: mfsmount[203855]: monotonic clock speed: 331293 ops / 10 mili seconds
Dec 26 14:40:18 k8s1 mfsmount[203855]: monotonic clock speed: 331293 ops / 10 mili seconds
Dec 26 14:40:19 k8s1 mount[203855]: mfsmount[203855]: my st_dev: 2097256
Dec 26 14:40:19 k8s1 mfsmount[203855]: my st_dev: 2097256
[root@k8s1 demo]#
[root@k8s1demo]# kubectl exec -it mfs-subpath-6db68d88d4-6z8vw -- df -h
Filesystem Size Used Available Use% Mounted on
overlay 198.9G 33.6G 165.3G 17% /
tmpfs 64.0M 0 64.0M 0% /dev
mfs#10.109.148.254:9421
10.0G 0 10.0G 0% /data
/dev/sda2 198.9G 33.6G 165.3G 17% /etc/hosts
/dev/sda2 198.9G 33.6G 165.3G 17% /dev/termination-log
/dev/sda2 198.9G 33.6G 165.3G 17% /etc/hostname
/dev/sda2 198.9G 33.6G 165.3G 17% /etc/resolv.conf
shm 64.0M 0 64.0M 0% /dev/shm
tmpfs 18.6G 12.0K 18.6G 0% /run/secrets/kubernetes.io/serviceaccount
tmpfs 11.7G 0 11.7G 0% /proc/acpi
tmpfs 64.0M 0 64.0M 0% /proc/kcore
tmpfs 64.0M 0 64.0M 0% /proc/keys
tmpfs 64.0M 0 64.0M 0% /proc/timer_list
tmpfs 11.7G 0 11.7G 0% /proc/scsi
tmpfs 11.7G 0 11.7G 0% /sys/firmware
Start fuse process use systemd is one way to solve the problem
this is systemd example : csi node-plugin shoud create it when NodeStageVolume
I have fix it in my env, this is a test that recreate node-plugin and mountpoint is healthy