linux-system-roles / storage

Ansible role for linux storage management
https://linux-system-roles.github.io/storage/
MIT License
101 stars 59 forks source link

storage: size doesn't work for partition pool #120

Open yizhanglinux opened 4 years ago

yizhanglinux commented 4 years ago

playbook

---
- hosts: all
  become: true
  vars:
    mount_location: '/opt/test1'
    volume_group_size: '5g'
    volume_size_before: '4g'
    volume_size_after: '10g'
    storage_safe_mode: false    

  tasks:
    - include_role:
        name: storage

    - include_tasks: get_unused_disk.yml
      vars:
        min_size: "{{ volume_group_size }}"
        max_return: 1

    - name: Create pool with partition
      include_role:
        name: storage
      vars:
          storage_pools:
            - name: foo
              disks: "{{ unused_disks }}"
              type: partition
              volumes:
                - name: test1
                  type: partition
                  fs_type: 'ext4'
                  size: '4g'
                  mount_point: "{{ mount_location }}"

ansible log

ASK [storage : manage the pools and volumes to match the specified state] ****************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:104
<localhost> ESTABLISH LOCAL CONNECTION FOR USER: root
<localhost> EXEC /bin/sh -c 'echo ~root && sleep 0'
<localhost> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp `"&& mkdir /root/.ansible/tmp/ansible-tmp-1593845989.9086514-186337-143479670088877 && echo ansible-tmp-1593845989.9086514-186337-143479670088877="` echo /root/.ansible/tmp/ansible-tmp-1593845989.9086514-186337-143479670088877 `" ) && sleep 0'
Using module file /root/test/storage/library/blivet.py
<localhost> PUT /root/.ansible/tmp/ansible-local-1856838dp9htp0/tmpghd9odlw TO /root/.ansible/tmp/ansible-tmp-1593845989.9086514-186337-143479670088877/AnsiballZ_blivet.py
<localhost> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1593845989.9086514-186337-143479670088877/ /root/.ansible/tmp/ansible-tmp-1593845989.9086514-186337-143479670088877/AnsiballZ_blivet.py && sleep 0'
<localhost> EXEC /bin/sh -c '/usr/libexec/platform-python /root/.ansible/tmp/ansible-tmp-1593845989.9086514-186337-143479670088877/AnsiballZ_blivet.py && sleep 0'
<localhost> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1593845989.9086514-186337-143479670088877/ > /dev/null 2>&1 && sleep 0'
changed: [localhost] => {
    "actions": [
        {
            "action": "create format",
            "device": "/dev/sdd",
            "fs_type": "disklabel"
        },
        {
            "action": "create device",
            "device": "/dev/sdd1",
            "fs_type": null
        },
        {
            "action": "create format",
            "device": "/dev/sdd1",
            "fs_type": "ext4"
        }
    ],
    "changed": true,
    "crypts": [],
    "invocation": {
        "module_args": {
            "disklabel_type": null,
            "packages_only": false,
            "pools": [
                {
                    "disks": [
                        "sdd"
                    ],
                    "encryption": false,
                    "encryption_cipher": null,
                    "encryption_key_file": null,
                    "encryption_key_size": null,
                    "encryption_luks_version": null,
                    "encryption_passphrase": null,
                    "name": "foo",
                    "raid_level": null,
                    "state": "present",
                    "type": "partition",
                    "volumes": [
                        {
                            "_device": "/dev/sdd1",
                            "_kernel_device": "/dev/sdd1",
                            "_mount_id": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
                            "_raw_device": "/dev/sdd1",
                            "_raw_kernel_device": "/dev/sdd1",
                            "encryption": false,
                            "encryption_cipher": null,
                            "encryption_key_file": null,
                            "encryption_key_size": null,
                            "encryption_luks_version": null,
                            "encryption_passphrase": null,
                            "fs_create_options": "",
                            "fs_label": "",
                            "fs_overwrite_existing": true,
                            "fs_type": "ext4",
                            "mount_check": 0,
                            "mount_device_identifier": "uuid",
                            "mount_options": "defaults",
                            "mount_passno": 0,
                            "mount_point": "/opt/test1",
                            "name": "test1",
                            "pool": "foo",
                            "raid_chunk_size": null,
                            "raid_device_count": null,
                            "raid_level": null,
                            "raid_metadata_version": null,
                            "raid_spare_count": null,
                            "size": "4g",
                            "state": "present",
                            "type": "partition"
                        }
                    ]
                }
            ],
            "safe_mode": false,
            "use_partitions": null,
            "volumes": []
        }
    },
    "leaves": [
        "/dev/sda1",
        "/dev/sda2",
        "/dev/mapper/rhel_storageqe--62-home",
        "/dev/mapper/rhel_storageqe--62-root",
        "/dev/mapper/rhel_storageqe--62-swap",
        "/dev/sdb",
        "/dev/sdh",
        "/dev/sdi",
        "/dev/sdj",
        "/dev/sdc",
        "/dev/sdk",
        "/dev/sdl1",
        "/dev/sde",
        "/dev/sdf",
        "/dev/sdg",
        "/dev/nvme1n2",
        "/dev/sdd1"
    ],
    "mounts": [
        {
            "dump": 0,
            "fstype": "ext4",
            "opts": "defaults",
            "passno": 0,
            "path": "/opt/test1",
            "src": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
            "state": "mounted"
        }
    ],
    "packages": [
        "xfsprogs",
        "lvm2",
        "dosfstools",
        "e2fsprogs"
    ],
    "pools": [
        {
            "disks": [
                "sdd"
            ],
            "encryption": false,
            "encryption_cipher": null,
            "encryption_key_file": null,
            "encryption_key_size": null,
            "encryption_luks_version": null,
            "encryption_passphrase": null,
            "name": "foo",
            "raid_level": null,
            "state": "present",
            "type": "partition",
            "volumes": [
                {
                    "_device": "/dev/sdd1",
                    "_kernel_device": "/dev/sdd1",
                    "_mount_id": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
                    "_raw_device": "/dev/sdd1",
                    "_raw_kernel_device": "/dev/sdd1",
                    "encryption": false,
                    "encryption_cipher": null,
                    "encryption_key_file": null,
                    "encryption_key_size": null,
                    "encryption_luks_version": null,
                    "encryption_passphrase": null,
                    "fs_create_options": "",
                    "fs_label": "",
                    "fs_overwrite_existing": true,
                    "fs_type": "ext4",
                    "mount_check": 0,
                    "mount_device_identifier": "uuid",
                    "mount_options": "defaults",
                    "mount_passno": 0,
                    "mount_point": "/opt/test1",
                    "name": "test1",
                    "pool": "foo",
                    "raid_chunk_size": null,
                    "raid_device_count": null,
                    "raid_level": null,
                    "raid_metadata_version": null,
                    "raid_spare_count": null,
                    "size": "4g",
                    "state": "present",
                    "type": "partition"
                }
            ]
        }
    ],
    "volumes": []
}

TASK [storage : debug] ********************************************************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:113
ok: [localhost] => {
    "blivet_output": {
        "actions": [
            {
                "action": "create format",
                "device": "/dev/sdd",
                "fs_type": "disklabel"
            },
            {
                "action": "create device",
                "device": "/dev/sdd1",
                "fs_type": null
            },
            {
                "action": "create format",
                "device": "/dev/sdd1",
                "fs_type": "ext4"
            }
        ],
        "changed": true,
        "crypts": [],
        "failed": false,
        "leaves": [
            "/dev/sda1",
            "/dev/sda2",
            "/dev/mapper/rhel_storageqe--62-home",
            "/dev/mapper/rhel_storageqe--62-root",
            "/dev/mapper/rhel_storageqe--62-swap",
            "/dev/sdb",
            "/dev/sdh",
            "/dev/sdi",
            "/dev/sdj",
            "/dev/sdc",
            "/dev/sdk",
            "/dev/sdl1",
            "/dev/sde",
            "/dev/sdf",
            "/dev/sdg",
            "/dev/nvme1n2",
            "/dev/sdd1"
        ],
        "mounts": [
            {
                "dump": 0,
                "fstype": "ext4",
                "opts": "defaults",
                "passno": 0,
                "path": "/opt/test1",
                "src": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
                "state": "mounted"
            }
        ],
        "packages": [
            "xfsprogs",
            "lvm2",
            "dosfstools",
            "e2fsprogs"
        ],
        "pools": [
            {
                "disks": [
                    "sdd"
                ],
                "encryption": false,
                "encryption_cipher": null,
                "encryption_key_file": null,
                "encryption_key_size": null,
                "encryption_luks_version": null,
                "encryption_passphrase": null,
                "name": "foo",
                "raid_level": null,
                "state": "present",
                "type": "partition",
                "volumes": [
                    {
                        "_device": "/dev/sdd1",
                        "_kernel_device": "/dev/sdd1",
                        "_mount_id": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
                        "_raw_device": "/dev/sdd1",
                        "_raw_kernel_device": "/dev/sdd1",
                        "encryption": false,
                        "encryption_cipher": null,
                        "encryption_key_file": null,
                        "encryption_key_size": null,
                        "encryption_luks_version": null,
                        "encryption_passphrase": null,
                        "fs_create_options": "",
                        "fs_label": "",
                        "fs_overwrite_existing": true,
                        "fs_type": "ext4",
                        "mount_check": 0,
                        "mount_device_identifier": "uuid",
                        "mount_options": "defaults",
                        "mount_passno": 0,
                        "mount_point": "/opt/test1",
                        "name": "test1",
                        "pool": "foo",
                        "raid_chunk_size": null,
                        "raid_device_count": null,
                        "raid_level": null,
                        "raid_metadata_version": null,
                        "raid_spare_count": null,
                        "size": "4g",
                        "state": "present",
                        "type": "partition"
                    }
                ]
            }
        ],
        "volumes": []
    }
}

TASK [storage : set the list of pools for test verification] ******************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:116
ok: [localhost] => {
    "ansible_facts": {
        "_storage_pools_list": [
            {
                "disks": [
                    "sdd"
                ],
                "encryption": false,
                "encryption_cipher": null,
                "encryption_key_file": null,
                "encryption_key_size": null,
                "encryption_luks_version": null,
                "encryption_passphrase": null,
                "name": "foo",
                "raid_level": null,
                "state": "present",
                "type": "partition",
                "volumes": [
                    {
                        "_device": "/dev/sdd1",
                        "_kernel_device": "/dev/sdd1",
                        "_mount_id": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
                        "_raw_device": "/dev/sdd1",
                        "_raw_kernel_device": "/dev/sdd1",
                        "encryption": false,
                        "encryption_cipher": null,
                        "encryption_key_file": null,
                        "encryption_key_size": null,
                        "encryption_luks_version": null,
                        "encryption_passphrase": null,
                        "fs_create_options": "",
                        "fs_label": "",
                        "fs_overwrite_existing": true,
                        "fs_type": "ext4",
                        "mount_check": 0,
                        "mount_device_identifier": "uuid",
                        "mount_options": "defaults",
                        "mount_passno": 0,
                        "mount_point": "/opt/test1",
                        "name": "test1",
                        "pool": "foo",
                        "raid_chunk_size": null,
                        "raid_device_count": null,
                        "raid_level": null,
                        "raid_metadata_version": null,
                        "raid_spare_count": null,
                        "size": "4g",
                        "state": "present",
                        "type": "partition"
                    }
                ]
            }
        ]
    },
    "changed": false
}

TASK [storage : set the list of volumes for test verification] ****************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:120
ok: [localhost] => {
    "ansible_facts": {
        "_storage_volumes_list": []
    },
    "changed": false
}

TASK [storage : remove obsolete mounts] ***************************************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:136

TASK [storage : tell systemd to refresh its view of /etc/fstab] ***************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:147
<localhost> ESTABLISH LOCAL CONNECTION FOR USER: root
<localhost> EXEC /bin/sh -c 'echo ~root && sleep 0'
<localhost> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp `"&& mkdir /root/.ansible/tmp/ansible-tmp-1593846001.2313185-186566-44315424536100 && echo ansible-tmp-1593846001.2313185-186566-44315424536100="` echo /root/.ansible/tmp/ansible-tmp-1593846001.2313185-186566-44315424536100 `" ) && sleep 0'
Using module file /usr/lib/python3.6/site-packages/ansible/modules/system/systemd.py
<localhost> PUT /root/.ansible/tmp/ansible-local-1856838dp9htp0/tmpf6tkcx9w TO /root/.ansible/tmp/ansible-tmp-1593846001.2313185-186566-44315424536100/AnsiballZ_systemd.py
<localhost> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1593846001.2313185-186566-44315424536100/ /root/.ansible/tmp/ansible-tmp-1593846001.2313185-186566-44315424536100/AnsiballZ_systemd.py && sleep 0'
<localhost> EXEC /bin/sh -c '/usr/libexec/platform-python /root/.ansible/tmp/ansible-tmp-1593846001.2313185-186566-44315424536100/AnsiballZ_systemd.py && sleep 0'
<localhost> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1593846001.2313185-186566-44315424536100/ > /dev/null 2>&1 && sleep 0'
ok: [localhost] => {
    "changed": false,
    "invocation": {
        "module_args": {
            "daemon_reexec": false,
            "daemon_reload": true,
            "enabled": null,
            "force": null,
            "masked": null,
            "name": null,
            "no_block": false,
            "scope": null,
            "state": null,
            "user": null
        }
    },
    "name": null,
    "status": {}
}

TASK [storage : set up new/current mounts] ************************************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:152
<localhost> ESTABLISH LOCAL CONNECTION FOR USER: root
<localhost> EXEC /bin/sh -c 'echo ~root && sleep 0'
<localhost> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp `"&& mkdir /root/.ansible/tmp/ansible-tmp-1593846003.272524-186605-26342796838830 && echo ansible-tmp-1593846003.272524-186605-26342796838830="` echo /root/.ansible/tmp/ansible-tmp-1593846003.272524-186605-26342796838830 `" ) && sleep 0'
Using module file /usr/lib/python3.6/site-packages/ansible/modules/system/mount.py
<localhost> PUT /root/.ansible/tmp/ansible-local-1856838dp9htp0/tmpro_sbuq7 TO /root/.ansible/tmp/ansible-tmp-1593846003.272524-186605-26342796838830/AnsiballZ_mount.py
<localhost> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1593846003.272524-186605-26342796838830/ /root/.ansible/tmp/ansible-tmp-1593846003.272524-186605-26342796838830/AnsiballZ_mount.py && sleep 0'
<localhost> EXEC /bin/sh -c '/usr/libexec/platform-python /root/.ansible/tmp/ansible-tmp-1593846003.272524-186605-26342796838830/AnsiballZ_mount.py && sleep 0'
<localhost> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1593846003.272524-186605-26342796838830/ > /dev/null 2>&1 && sleep 0'
changed: [localhost] => (item={'src': 'UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004', 'path': '/opt/test1', 'fstype': 'ext4', 'opts': 'defaults', 'dump': 0, 'passno': 0, 'state': 'mounted'}) => {
    "ansible_loop_var": "mount_info",
    "changed": true,
    "dump": "0",
    "fstab": "/etc/fstab",
    "fstype": "ext4",
    "invocation": {
        "module_args": {
            "backup": false,
            "boot": true,
            "dump": null,
            "fstab": null,
            "fstype": "ext4",
            "opts": "defaults",
            "passno": null,
            "path": "/opt/test1",
            "src": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
            "state": "mounted"
        }
    },
    "mount_info": {
        "dump": 0,
        "fstype": "ext4",
        "opts": "defaults",
        "passno": 0,
        "path": "/opt/test1",
        "src": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004",
        "state": "mounted"
    },
    "name": "/opt/test1",
    "opts": "defaults",
    "passno": "0",
    "src": "UUID=724bb9aa-9ccb-40d3-bb7b-d15fb76d7004"
}

TASK [storage : tell systemd to refresh its view of /etc/fstab] ***************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:163
<localhost> ESTABLISH LOCAL CONNECTION FOR USER: root
<localhost> EXEC /bin/sh -c 'echo ~root && sleep 0'
<localhost> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp `"&& mkdir /root/.ansible/tmp/ansible-tmp-1593846004.2413008-186629-241556125438887 && echo ansible-tmp-1593846004.2413008-186629-241556125438887="` echo /root/.ansible/tmp/ansible-tmp-1593846004.2413008-186629-241556125438887 `" ) && sleep 0'
Using module file /usr/lib/python3.6/site-packages/ansible/modules/system/systemd.py
<localhost> PUT /root/.ansible/tmp/ansible-local-1856838dp9htp0/tmp77agcjux TO /root/.ansible/tmp/ansible-tmp-1593846004.2413008-186629-241556125438887/AnsiballZ_systemd.py
<localhost> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1593846004.2413008-186629-241556125438887/ /root/.ansible/tmp/ansible-tmp-1593846004.2413008-186629-241556125438887/AnsiballZ_systemd.py && sleep 0'
<localhost> EXEC /bin/sh -c '/usr/libexec/platform-python /root/.ansible/tmp/ansible-tmp-1593846004.2413008-186629-241556125438887/AnsiballZ_systemd.py && sleep 0'
<localhost> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1593846004.2413008-186629-241556125438887/ > /dev/null 2>&1 && sleep 0'
ok: [localhost] => {
    "changed": false,
    "invocation": {
        "module_args": {
            "daemon_reexec": false,
            "daemon_reload": true,
            "enabled": null,
            "force": null,
            "masked": null,
            "name": null,
            "no_block": false,
            "scope": null,
            "state": null,
            "user": null
        }
    },
    "name": null,
    "status": {}
}

TASK [storage : Manage /etc/crypttab to account for changes we just made] *****************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:171

TASK [storage : Update facts] *************************************************************************************************************************************************************************************
task path: /root/test/storage/tasks/main-blivet.yml:186
<localhost> ESTABLISH LOCAL CONNECTION FOR USER: root
<localhost> EXEC /bin/sh -c 'echo ~root && sleep 0'
<localhost> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /root/.ansible/tmp `"&& mkdir /root/.ansible/tmp/ansible-tmp-1593846005.4868796-186672-253555565078477 && echo ansible-tmp-1593846005.4868796-186672-253555565078477="` echo /root/.ansible/tmp/ansible-tmp-1593846005.4868796-186672-253555565078477 `" ) && sleep 0'
Using module file /usr/lib/python3.6/site-packages/ansible/modules/system/setup.py
<localhost> PUT /root/.ansible/tmp/ansible-local-1856838dp9htp0/tmp51gd2aez TO /root/.ansible/tmp/ansible-tmp-1593846005.4868796-186672-253555565078477/AnsiballZ_setup.py
<localhost> EXEC /bin/sh -c 'chmod u+x /root/.ansible/tmp/ansible-tmp-1593846005.4868796-186672-253555565078477/ /root/.ansible/tmp/ansible-tmp-1593846005.4868796-186672-253555565078477/AnsiballZ_setup.py && sleep 0'
<localhost> EXEC /bin/sh -c '/usr/libexec/platform-python /root/.ansible/tmp/ansible-tmp-1593846005.4868796-186672-253555565078477/AnsiballZ_setup.py && sleep 0'
<localhost> EXEC /bin/sh -c 'rm -f -r /root/.ansible/tmp/ansible-tmp-1593846005.4868796-186672-253555565078477/ > /dev/null 2>&1 && sleep 0'
ok: [localhost]
META: ran handlers
META: ran handlers

PLAY RECAP ********************************************************************************************************************************************************************************************************
localhost                  : ok=43   changed=2    unreachable=0    failed=0    skipped=17   rescued=0    ignored=0  

The size: 4g doesn't work for sdd disk

[root@storageqe-62 storage]# lsblk /dev/sdd
NAME   MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
sdd      8:48   0 111.8G  0 disk 
└─sdd1   8:49   0 111.8G  0 part 
dwlehman commented 4 years ago

Currently, the only support for partition volumes is for a single partition that fills the entire disk (aka "partition pool").

yizhanglinux commented 4 years ago

Found another interesting thing, with bellow playbook, the first task will created one partition with the entire disk[1], and the second task will remove the encryption layer and creat one 4GB size disk partition. So size works for partition pool with remove encryption layer opeartion.

---
- hosts: all
  become: true
  vars:
    storage_safe_mode: false
    mount_location: '/opt/test1'
    volume_size: '5g'

  tasks:
    - include_role:
        name: storage

    - include_tasks: get_unused_disk.yml
      vars:
        min_size: "{{ volume_size }}"
        max_return: 1

    - name: Create an encrypted partition volume w/ default fs
      include_role:
        name: storage
      vars:
        storage_pools:
          - name: foo
            type: partition
            disks: "{{ unused_disks }}"
            volumes:
              - name: test1
                type: partition
                mount_point: "{{ mount_location }}"
                size: 4g
                encryption: true
                encryption_passphrase: 'yabbadabbadoo'

    - shell: lsblk

    - name: Remove the encryption layer
      include_role:
        name: storage
      vars:
        storage_pools:
          - name: foo
            type: partition
            disks: "{{ unused_disks }}"
            volumes:
              - name: test1
                type: partition
                mount_point: "{{ mount_location }}"
                size: 4g
                encryption: false
                encryption_passphrase: 'yabbadabbadoo'

    - shell: lsblk

    - name: Clean up
      include_role:
        name: storage
      vars:
        storage_volumes:
          - name: foo
            type: disk
            disks: "{{ unused_disks }}"
            state: absent

The two times' lsblk ouput:

#lsblk 
    "stdout_lines": [
        "NAME                                          MAJ:MIN RM   SIZE RO TYPE  MOUNTPOINT",
        "sdb                                             8:16   0 279.4G  0 disk  ",
        "└─sdb1                                          8:17   0 279.4G  0 part  ",
        "  └─luks-38db2aa2-5ef8-4a0b-96b7-77bf85658f72 253:3    0 279.4G  0 crypt /opt/test1"
    ]

#lsblk 
    "stdout_lines": [
        "NAME   MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT",
        "sdb      8:16   0 279.4G  0 disk ",
        "└─sdb1   8:17   0     4G  0 part /opt/test1"
    ]
yizhanglinux commented 4 years ago

The two times' lsblk ouput:

#lsblk 
    "stdout_lines": [
        "NAME                                          MAJ:MIN RM   SIZE RO TYPE  MOUNTPOINT",
        "sdb                                             8:16   0 279.4G  0 disk  ",
        "└─sdb1                                          8:17   0 279.4G  0 part  ",
        "  └─luks-38db2aa2-5ef8-4a0b-96b7-77bf85658f72 253:3    0 279.4G  0 crypt /opt/test1"
    ]

#lsblk 
    "stdout_lines": [
        "NAME   MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT",
        "sdb      8:16   0 279.4G  0 disk ",
        "└─sdb1   8:17   0     4G  0 part /opt/test1"
    ]

This issue was fixed by bellow PR, the partition's size is now same as the disk https://github.com/linux-system-roles/storage/pull/125