Open xuanyuanaosheng opened 2 weeks ago
本例子是从旧的HITACHI VSP 迁移至新的 HITACHI VSP
HITACHI VSP
[root@bjdcsrv402 ~]# multipath -ll mpathe (360060e801262eb00504062eb0000004b) dm-4 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:0:3 sdi 8:128 active ready running |- 16:0:1:3 sdm 8:192 active ready running |- 15:0:1:3 sde 8:64 active ready running `- 16:0:0:3 sdq 65:0 active ready running mpathd (360060e801262eb00504062eb00000049) dm-5 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:1:1 sdc 8:32 active ready running |- 16:0:1:1 sdk 8:160 active ready running |- 15:0:0:1 sdg 8:96 active ready running `- 16:0:0:1 sdo 8:224 active ready running mpathc (360060e801262eb00504062eb0000004a) dm-3 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:1:2 sdd 8:48 active ready running |- 16:0:1:2 sdl 8:176 active ready running |- 15:0:0:2 sdh 8:112 active ready running `- 16:0:0:2 sdp 8:240 active ready running mpathb (360060e801262eb00504062eb00000048) dm-2 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:1:0 sdb 8:16 active ready running |- 16:0:1:0 sdj 8:144 active ready running |- 15:0:0:0 sdf 8:80 active ready running `- 16:0:0:0 sdn 8:208 active ready running
mpathj (360060e80123ba20050403ba200000084) dm-11 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:3:4 sdy 65:128 active ready running |- 16:0:2:4 sdac 65:192 active ready running |- 15:0:2:4 sdu 65:64 active ready running `- 16:0:3:4 sdag 66:0 active ready running mpathi (360060e80123ba20050403ba200000082) dm-10 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:2:2 sds 65:32 active ready running |- 16:0:2:2 sdaa 65:160 active ready running |- 15:0:3:2 sdw 65:96 active ready running `- 16:0:3:2 sdae 65:224 active ready running mpathh (360060e80123ba20050403ba200000083) dm-9 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:2:3 sdt 65:48 active ready running |- 16:0:2:3 sdab 65:176 active ready running |- 15:0:3:3 sdx 65:112 active ready running `- 16:0:3:3 sdaf 65:240 active ready running mpathg (360060e80123ba20050403ba200000081) dm-8 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:2:1 sdr 65:16 active ready running |- 16:0:2:1 sdz 65:144 active ready running |- 15:0:3:1 sdv 65:80 active ready running `- 16:0:3:1 sdad 65:208 active ready running
vgs
pvs
lvs
lsblk
vgdisplay
# 查看vg存储状态 [root@bjdcsrv402 ~]# vgs VG #PV #LV #SN Attr VSize VFree ol 1 3 0 wz--n- 432.00g 4.00m vgMONGOtest 4 1 0 wz--n- <599.88g 896.00m # 可以看出原来的数据存储情况 [root@bjdcsrv402 ~]# pvs PV VG Fmt Attr PSize PFree /dev/mapper/mpathb vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/mapper/mpathc vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/mapper/mpathd vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/mapper/mpathe vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/sda2 ol lvm2 a-- 432.00g 4.00m # 查看原有的数据分布情况 [root@bjdcsrv402 ~]# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sdy 65:128 0 150G 0 disk └─mpathj 252:11 0 150G 0 mpath sdf 8:80 0 150G 0 disk └─mpathb 252:2 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdab 65:176 0 150G 0 disk └─mpathh 252:9 0 150G 0 mpath sdo 8:224 0 150G 0 disk └─mpathd 252:5 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdw 65:96 0 150G 0 disk └─mpathi 252:10 0 150G 0 mpath sdd 8:48 0 150G 0 disk └─mpathc 252:3 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdm 8:192 0 150G 0 disk └─mpathe 252:4 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdu 65:64 0 150G 0 disk └─mpathj 252:11 0 150G 0 mpath sdb 8:16 0 150G 0 disk └─mpathb 252:2 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdk 8:160 0 150G 0 disk └─mpathd 252:5 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sds 65:32 0 150G 0 disk └─mpathi 252:10 0 150G 0 mpath sdag 66:0 0 150G 0 disk └─mpathj 252:11 0 150G 0 mpath sdi 8:128 0 150G 0 disk └─mpathe 252:4 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdq 65:0 0 150G 0 disk └─mpathe 252:4 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdae 65:224 0 150G 0 disk └─mpathi 252:10 0 150G 0 mpath sdz 65:144 0 150G 0 disk └─mpathg 252:8 0 150G 0 mpath sdg 8:96 0 150G 0 disk └─mpathd 252:5 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdac 65:192 0 150G 0 disk └─mpathj 252:11 0 150G 0 mpath sdx 65:112 0 150G 0 disk └─mpathh 252:9 0 150G 0 mpath sde 8:64 0 150G 0 disk └─mpathe 252:4 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdaa 65:160 0 150G 0 disk └─mpathi 252:10 0 150G 0 mpath sdn 8:208 0 150G 0 disk └─mpathb 252:2 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdv 65:80 0 150G 0 disk └─mpathg 252:8 0 150G 0 mpath sdc 8:32 0 150G 0 disk └─mpathd 252:5 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdl 8:176 0 150G 0 disk └─mpathc 252:3 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdt 65:48 0 150G 0 disk └─mpathh 252:9 0 150G 0 mpath sda 8:0 0 558.4G 0 disk ├─sda2 8:2 0 432G 0 part │ ├─ol-swap 252:1 0 32G 0 lvm [SWAP] │ ├─ol-root 252:0 0 100G 0 lvm / │ └─ol-logs 252:7 0 300G 0 lvm /logs └─sda1 8:1 0 1G 0 part /boot sdj 8:144 0 150G 0 disk └─mpathb 252:2 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdr 65:16 0 150G 0 disk └─mpathg 252:8 0 150G 0 mpath sdaf 65:240 0 150G 0 disk └─mpathh 252:9 0 150G 0 mpath sdh 8:112 0 150G 0 disk └─mpathc 252:3 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdp 8:240 0 150G 0 disk └─mpathc 252:3 0 150G 0 mpath └─vgMONGOtest-lvMONGOtest 252:6 0 599G 0 lvm /data sdad 65:208 0 150G 0 disk └─mpathg 252:8 0 150G 0 mpath [root@bjdcsrv402 ~]# lvs LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert logs ol -wi-ao---- 300.00g root ol -wi-ao---- 100.00g swap ol -wi-ao---- 32.00g lvMONGOtest vgMONGOtest -wi-ao---- 599.00g [root@bjdcsrv402 ~]# vgdisplay --- Volume group --- VG Name ol System ID Format lvm2 Metadata Areas 1 Metadata Sequence No 4 VG Access read/write VG Status resizable MAX LV 0 Cur LV 3 Open LV 3 Max PV 0 Cur PV 1 Act PV 1 VG Size 432.00 GiB PE Size 4.00 MiB Total PE 110593 Alloc PE / Size 110592 / 432.00 GiB Free PE / Size 1 / 4.00 MiB VG UUID Y4nfqc-uamr-nJir-7czy-3Mmz-BwQH-GjUarL --- Volume group --- VG Name vgMONGOtest System ID Format lvm2 Metadata Areas 4 Metadata Sequence No 5 VG Access read/write VG Status resizable MAX LV 0 Cur LV 1 Open LV 1 Max PV 0 Cur PV 4 Act PV 4 VG Size <599.88 GiB PE Size 32.00 MiB Total PE 19196 Alloc PE / Size 19168 / 599.00 GiB Free PE / Size 28 / 896.00 MiB VG UUID glidh2-DDoZ-nb82-7mkm-0ul2-WPFB-7vYG0j 可以看出原来的VG里面包含的4块150G的盘组为: `mpathe`, `mpathd`,`mpathc`,`mpathb` ; 新增的4个150G的盘组为:`mpathi`, `mpathj`, `mpathj`, `mpathg`.
VG
[root@bjdcsrv402 ~]# vgextend vgMONGOtest /dev/mapper/mpathi Physical volume "/dev/mapper/mpathi" successfully created. Volume group "vgMONGOtest" successfully extended [root@bjdcsrv402 ~]# vgextend vgMONGOtest /dev/mapper/mpathj Physical volume "/dev/mapper/mpathj" successfully created. Volume group "vgMONGOtest" successfully extended [root@bjdcsrv402 ~]# vgextend vgMONGOtest /dev/mapper/mpathh Physical volume "/dev/mapper/mpathh" successfully created. Volume group "vgMONGOtest" successfully extended [root@bjdcsrv402 ~]# vgextend vgMONGOtest /dev/mapper/mpathg Physical volume "/dev/mapper/mpathg" successfully created. Volume group "vgMONGOtest" successfully extended **迁移规划:由于每块盘组的大小都是150G,所以只需要随便对应起来进行迁移即可。**
e ---> j d ---> i c ---> h b ---> g
[root@bjdcsrv402 ~]# pvmove /dev/mapper/mpathe /dev/mapper/mpathj ... [root@bjdcsrv402 ~]# pvmove /dev/mapper/mpathd /dev/mapper/mpathi ··· [root@bjdcsrv402 ~]# pvmove /dev/mapper/mpathc /dev/mapper/mpathh ··· [root@bjdcsrv402 ~]# pvmove /dev/mapper/mpathb /dev/mapper/mpathg /dev/mapper/mpathb: Moved: 0.00% /dev/mapper/mpathb: Moved: 3.28% /dev/mapper/mpathb: Moved: 6.70% /dev/mapper/mpathb: Moved: 9.81% /dev/mapper/mpathb: Moved: 13.19% /dev/mapper/mpathb: Moved: 16.76% /dev/mapper/mpathb: Moved: 20.22% /dev/mapper/mpathb: Moved: 23.79% /dev/mapper/mpathb: Moved: 27.36% /dev/mapper/mpathb: Moved: 30.91% /dev/mapper/mpathb: Moved: 34.43% /dev/mapper/mpathb: Moved: 37.83% /dev/mapper/mpathb: Moved: 41.17% /dev/mapper/mpathb: Moved: 44.51% /dev/mapper/mpathb: Moved: 47.75% /dev/mapper/mpathb: Moved: 50.63% /dev/mapper/mpathb: Moved: 53.23% /dev/mapper/mpathb: Moved: 56.62% /dev/mapper/mpathb: Moved: 59.33% /dev/mapper/mpathb: Moved: 63.33% /dev/mapper/mpathb: Moved: 66.36% /dev/mapper/mpathb: Moved: 69.03% /dev/mapper/mpathb: Moved: 72.58% /dev/mapper/mpathb: Moved: 75.61% /dev/mapper/mpathb: Moved: 78.46% /dev/mapper/mpathb: Moved: 81.62% /dev/mapper/mpathb: Moved: 84.58% /dev/mapper/mpathb: Moved: 87.17% /dev/mapper/mpathb: Moved: 90.19% /dev/mapper/mpathb: Moved: 93.38% /dev/mapper/mpathb: Moved: 96.56% /dev/mapper/mpathb: Moved: 99.92%
multipath -ll
[root@bjdcsrv402 ~]# pvs PV VG Fmt Attr PSize PFree /dev/mapper/mpathb vgMONGOtest lvm2 a-- <149.97g <149.97g /dev/mapper/mpathc vgMONGOtest lvm2 a-- <149.97g <149.97g /dev/mapper/mpathd vgMONGOtest lvm2 a-- <149.97g <149.97g /dev/mapper/mpathe vgMONGOtest lvm2 a-- <149.97g <149.97g /dev/mapper/mpathg vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/mapper/mpathh vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/mapper/mpathi vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/mapper/mpathj vgMONGOtest lvm2 a-- <149.97g 224.00m /dev/sda2 ol lvm2 a-- 432.00g 4.00m
[root@bjdcsrv402 ~]# vgreduce vgMONGOtest /dev/mapper/mpathe Removed "/dev/mapper/mpathe" from volume group "vgMONGOtest" [root@bjdcsrv402 ~]# vgreduce vgMONGOtest /dev/mapper/mpathd Removed "/dev/mapper/mpathd" from volume group "vgMONGOtest" [root@bjdcsrv402 ~]# vgreduce vgMONGOtest /dev/mapper/mpathc Removed "/dev/mapper/mpathc" from volume group "vgMONGOtest" [root@bjdcsrv402 ~]# vgreduce vgMONGOtest /dev/mapper/mpathb Removed "/dev/mapper/mpathb" from volume group "vgMONGOtest"
[root@bjdcsrv402 ~]# kpartx -d /dev/mapper/mpathe [root@bjdcsrv402 ~]# kpartx -d /dev/mapper/mpathd [root@bjdcsrv402 ~]# kpartx -d /dev/mapper/mpathc [root@bjdcsrv402 ~]# kpartx -d /dev/mapper/mpathb [root@bjdcsrv402 ~]# multipath -f /dev/mapper/mpathe [root@bjdcsrv402 ~]# multipath -f /dev/mapper/mpathd [root@bjdcsrv402 ~]# multipath -f /dev/mapper/mpathc [root@bjdcsrv402 ~]# multipath -f /dev/mapper/mpathb ## 可以看出,旧的磁盘组已经被删除 [root@bjdcsrv402 ~]# multipath -ll mpathj (360060e80123ba20050403ba200000084) dm-11 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:3:4 sdy 65:128 active ready running |- 16:0:2:4 sdac 65:192 active ready running |- 15:0:2:4 sdu 65:64 active ready running `- 16:0:3:4 sdag 66:0 active ready running mpathi (360060e80123ba20050403ba200000082) dm-10 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:2:2 sds 65:32 active ready running |- 16:0:2:2 sdaa 65:160 active ready running |- 15:0:3:2 sdw 65:96 active ready running `- 16:0:3:2 sdae 65:224 active ready running mpathh (360060e80123ba20050403ba200000083) dm-9 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:2:3 sdt 65:48 active ready running |- 16:0:2:3 sdab 65:176 active ready running |- 15:0:3:3 sdx 65:112 active ready running `- 16:0:3:3 sdaf 65:240 active ready running mpathg (360060e80123ba20050403ba200000081) dm-8 HITACHI ,OPEN-V size=150G features='1 queue_if_no_path' hwhandler='0' wp=rw `-+- policy='round-robin 0' prio=1 status=active |- 15:0:2:1 sdr 65:16 active ready running |- 16:0:2:1 sdz 65:144 active ready running |- 15:0:3:1 sdv 65:80 active ready running `- 16:0:3:1 sdad 65:208 active ready running ``` 至此,底层存储迁移完毕。
REST_API_Reference_VSP_5000_VSP_E_VSP_Gx00_Fx00_MK-98RD9014-12.pdf
本例子是从旧的
HITACHI VSP
迁移至新的HITACHI VSP
步骤一:查看旧服务器的数据存储情况:
步骤二:联系存储团队,将新的存储挂载到服务器上
步骤三:使用
vgs
pvs
lvs
lsblk
vgdisplay
查看状态步骤四:将新分配的存储增加至
VG
e ---> j d ---> i c ---> h b ---> g
步骤五:按照计划进行迁移
步骤六:从新使用命令
vgs
pvs
lvs
lsblk
vgdisplay
multipath -ll
查看状态步骤七: 将旧的盘组从vg中移除
步骤八:移除旧的磁盘组