Closed rbo closed 10 months ago
After the change, topolvm have problems:
oc logs topolvm-node-s6wb9
Defaulted container "lvmd" out of: lvmd, topolvm-node, csi-registrar, liveness-probe, file-checker (init)
2023-11-21T08:50:39.587821Z topolvm-node-s6wb9 lvmd info: "configuration file loaded: " device_classes="[0xc000686b00]" file_name="/etc/topolvm/lvmd.yaml" socket_name="/run/lvmd/lvmd.sock"
2023-11-21T08:50:39.589404Z topolvm-node-s6wb9 lvmd info: "invoking LVM command" args="[fullreport --reportformat json --units b --nosuffix --configreport vg -o vg_name,vg_uuid,vg_size,vg_free --configreport lv -o lv_uuid,lv_name,lv_full_name,lv_path,lv_size,lv_kernel_major,lv_kernel_minor,origin,origin_size,pool_lv,lv_tags,lv_attr,vg_name,data_percent,metadata_percent,pool_lv --configreport pv -o, --configreport pvseg -o, --configreport seg -o,]"
WARNING: devices file is missing /dev/mapper/36001e67568ced0002b8750fa92a99f10 (253:7) using multipath component /dev/sdb.
See lvmdevices --update for devices file update.
2023-11-21T08:50:39.650940Z topolvm-node-s6wb9 lvmd error: "Volume group not found:" volume_group="for-etcd"
Error: not found
not found
oc debug node/inf7
Starting pod/inf7-debug ...
To use host binaries, run `chroot /host`
Pod IP: 10.32.96.7
If you don't see a command prompt, try pressing enter.
sh-4.4# sudo su -
sh: sudo: command not found
sh-4.4# chroot /host
sh-5.1# lvm
lvm lvmconfig lvmdiskscan lvmpolld lvmsar
lvm_import_vdo lvmdevices lvmdump lvmsadc
sh-5.1# lvmdevices
Device /dev/sdb IDTYPE=sys_wwid IDNAME=naa.6001e67568ced0002b8750fa92a99f10 DEVNAME=/dev/sdd PVID=oaaoJrUqC3JW19EPhZsDgs1O2GkJS8dF
sh-5.1# lvmdevices --update
WARNING: devices file is missing /dev/mapper/36001e67568ced0002b8750fa92a99f10 (253:7) using multipath component /dev/sdb.
Devices file /dev/sdb is excluded: device is a multipath component.
Removing multipath component /dev/sdb.
Updated devices file to version 1.1.4
sh-5.1# lvmdevices
sh-5.1#
sh-5.1# lsblk /dev/sdb
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sdb 8:16 0 445.2G 0 disk
|-for--etcd-thin--pool--1_tmeta 253:0 0 204M 0 lvm
| `-for--etcd-thin--pool--1-tpool 253:2 0 400.3G 0 lvm
| |-for--etcd-thin--pool--1 253:3 0 400.3G 1 lvm
| `-for--etcd-92c21198--2741--482b--bec5--f821ecb85951 253:4 0 4G 0 lvm
|-for--etcd-thin--pool--1_tdata 253:1 0 400.3G 0 lvm
| `-for--etcd-thin--pool--1-tpool 253:2 0 400.3G 0 lvm
| |-for--etcd-thin--pool--1 253:3 0 400.3G 1 lvm
| `-for--etcd-92c21198--2741--482b--bec5--f821ecb85951 253:4 0 4G 0 lvm
`-36001e67568ced0002b8750fa92a99f10 253:7 0 445.2G 0 mpath
sh-5.1#
ISCSI Test was successfull with multipath
[root@ucs-blade-server-3 ~]# multipath -l
3600a09803830326d525d4a3370736263 dm-0 NETAPP,LUN C-Mode
size=1.0G features='3 queue_if_no_path pg_init_retries 50' hwhandler='1 alua' wp=rw
|-+- policy='service-time 0' prio=0 status=active
| `- 1:0:0:0 sdc 8:32 active undef running
`-+- policy='service-time 0' prio=0 status=enabled
`- 2:0:0:0 sdd 8:48 active undef running
[root@ucs-blade-server-3 ~]#
https://access.redhat.com/articles/7008552 https://docs.netapp.com/us-en/trident/trident-use/worker-node-prep.html#install-the-iscsi-tools