From 468e3638fc05fe99a4be963fb11034f9e3206447 Mon Sep 17 00:00:00 2001 From: Jan Pokorny Date: Mon, 18 Sep 2023 11:56:06 +0200 Subject: [PATCH] feat: Added support for creating shared LVM setups - feature requested by GFS2 - adds support for creating shared VGs - shared LVM setup needs lvmlockd service with dlm lock manager to be running - to test this change ha_cluster system role is used to set up degenerated cluster on localhost - requires blivet version with shared LVM setup support (https://github.com/storaged-project/blivet/pull/1123) Note: The test is incomplete. The part of it is commented out. Running the complete test will result in LVM crash and will corrupt the machine. Until resolved, this PR should not be merged. --- README.md | 9 +++ defaults/main.yml | 2 + library/blivet.py | 3 +- tests/collection-requirements.yml | 3 + tests/test-verify-pool.yml | 14 ++++ tests/tests_lvm_pool_shared.yml | 113 ++++++++++++++++++++++++++++++ 6 files changed, 143 insertions(+), 1 deletion(-) create mode 100644 tests/collection-requirements.yml create mode 100644 tests/tests_lvm_pool_shared.yml diff --git a/README.md b/README.md index 7f6f5c58..55f39597 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,15 @@ keys: This specifies the type of pool to manage. Valid values for `type`: `lvm`. +- `shared` + + If set to `true`, the role creates or manages a shared volume group. Requires lvmlockd and + dlm services configured and running. + Default: `false` + __WARNING__: Modifying the `shared` value on an existing pool is a + destructive operation. The pool itself will be removed as part of the + process. + - `disks` A list which specifies the set of disks to use as backing storage for the pool. diff --git a/defaults/main.yml b/defaults/main.yml index 7703c982..755364ae 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -27,6 +27,8 @@ storage_pool_defaults: raid_chunk_size: null raid_metadata_version: null + shared: false + storage_volume_defaults: state: "present" type: lvm diff --git a/library/blivet.py b/library/blivet.py index 5e03a9d8..db4d398f 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -1527,7 +1527,7 @@ def _create(self): if not self._device: members = self._manage_encryption(self._create_members()) try: - pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) + pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members, shared=self._pool['shared']) except Exception as e: raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e))) @@ -1823,6 +1823,7 @@ def run_module(): raid_spare_count=dict(type='int'), raid_metadata_version=dict(type='str'), raid_chunk_size=dict(type='str'), + shared=dict(type='bool'), state=dict(type='str', default='present', choices=['present', 'absent']), type=dict(type='str'), volumes=dict(type='list', elements='dict', default=list(), diff --git a/tests/collection-requirements.yml b/tests/collection-requirements.yml new file mode 100644 index 00000000..f9afb0d9 --- /dev/null +++ b/tests/collection-requirements.yml @@ -0,0 +1,3 @@ + --- +collections: + - fedora.linux_system_roles diff --git a/tests/test-verify-pool.yml b/tests/test-verify-pool.yml index 55efa196..b4a5ff87 100644 --- a/tests/test-verify-pool.yml +++ b/tests/test-verify-pool.yml @@ -15,6 +15,20 @@ # compression # deduplication +- name: Get VG shared value status + command: sudo vgs --noheadings --binary -o shared {{ storage_test_pool.name }} + register: vgs_dump + when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present' + changed_when: false + +- name: Verify that VG shared value checks out + assert: + that: (storage_test_pool.shared | bool) == ('1' in vgs_dump.stdout) + msg: >- + Shared VG presence ({{ storage_test_pool.shared }}) + does not match its expected state ({{ '1' in vgs_dump.stdout }}) + when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present' + - name: Verify pool subset include_tasks: "test-verify-pool-{{ storage_test_pool_subset }}.yml" loop: "{{ _storage_pool_tests }}" diff --git a/tests/tests_lvm_pool_shared.yml b/tests/tests_lvm_pool_shared.yml new file mode 100644 index 00000000..0f1df81f --- /dev/null +++ b/tests/tests_lvm_pool_shared.yml @@ -0,0 +1,113 @@ +--- +- hosts: all + become: true + vars: + storage_safe_mode: false + storage_use_partitions: true + mount_location1: '/opt/test1' + volume1_size: '4g' + + # This test requires ha_cluster system role to run + # To install the role manually run: ansible-galaxy install linux-system-roles.ha_cluster + tasks: + - name: Create cluster + ansible.builtin.include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_cluster_name: rhel9-1node + # Users should vault-encrypt the password + ha_cluster_hacluster_password: hapasswd + ha_cluster_extra_packages: + - dlm + - lvm2-lockd + ha_cluster_cluster_properties: + - attrs: + # Don't do this in production + - name: stonith-enabled + value: 'false' + ha_cluster_resource_primitives: + - id: dlm + agent: 'ocf:pacemaker:controld' + instance_attrs: + - attrs: + # Don't do this in production + - name: allow_stonith_disabled + value: 'true' + - id: lvmlockd + agent: 'ocf:heartbeat:lvmlockd' + ha_cluster_resource_groups: + - id: locking + resource_ids: + - dlm + - lvmlockd + +# - include_role: +# name: linux-system-roles.storage +# +# - name: Get unused disks +# include_tasks: get_unused_disk.yml +# vars: +# max_return: 1 +# +# - name: >- +# Create a disk device; specify disks as non-list mounted on +# {{ mount_location }} +# include_role: +# name: linux-system-roles.storage +# vars: +# storage_pools: +# - name: vg1 +# disks: "{{ unused_disks }}" +# type: lvm +# shared: true +# state: present +# volumes: +# - name: lv1 +# size: "{{ volume1_size }}" +# mount_point: "{{ mount_location1 }}" +# +# - include_tasks: verify-role-results.yml +# +# - name: Repeat the previous step to verify idempotence +# include_role: +# name: linux-system-roles.storage +# vars: +# storage_pools: +# - name: vg1 +# disks: "{{ unused_disks }}" +# type: lvm +# shared: true +# state: present +# volumes: +# - name: lv1 +# size: "{{ volume1_size }}" +# mount_point: "{{ mount_location1 }}" +# +# - include_tasks: verify-role-results.yml +# +# - name: >- +# Remove the device created above +# {{ mount_location }} +# include_role: +# name: linux-system-roles.storage +# vars: +# storage_pools: +# - name: vg1 +# disks: "{{ unused_disks }}" +# type: lvm +# shared: true +# state: absent +# volumes: +# - name: lv1 +# size: "{{ volume1_size }}" +# mount_point: "{{ mount_location1 }}" +# +# - include_tasks: verify-role-results.yml + + - name: Remove cluster + ansible.builtin.include_role: + name: linux-system-roles.ha_cluster + vars: + ha_cluster_cluster_name: rhel9-1node + ha_cluster_cluster_present: false +