diff --git a/README.md b/README.md index 1ba7c835..6bf289d1 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,14 @@ As of now, the role supports managing file systems and mount entries on Role Variables -------------- +__NOTE__: Beginning with version 1.3.0, unspecified parameters are interpreted +differently for existing and non-existing pools/volumes. For new/non-existent +pools and volumes, any omitted omitted parameters will use the default value as +described in `defaults/main.yml`. For existing pools and volumes, omitted +parameters will inherit whatever setting the pool or volume already has. +This means that to change/override role defaults in an existing pool or volume, +you must explicitly specify the new values/settings in the role variables. + #### `storage_pools` The `storage_pools` variable is a list of pools to manage. Each pool contains a nested list of `volume` dicts as described below, as well as the following diff --git a/defaults/main.yml b/defaults/main.yml index cfef6ad4..cf2501b7 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -8,6 +8,7 @@ storage_safe_mode: true # fail instead of implicitly/automatically removing dev storage_pool_defaults: state: "present" type: lvm + disks: [] volumes: [] encryption: false @@ -27,6 +28,7 @@ storage_volume_defaults: state: "present" type: lvm size: 0 + disks: [] fs_type: "xfs" fs_label: "" diff --git a/library/blivet.py b/library/blivet.py index 9e760a3c..eb8bb118 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -131,6 +131,8 @@ use_partitions = None # create partitions on pool backing device disks? disklabel_type = None # user-specified disklabel type safe_mode = None # do not remove any existing devices or formatting +pool_defaults = dict() +volume_defaults = dict() def find_duplicate_names(dicts): @@ -291,16 +293,16 @@ def required_packages(self): if self.__class__.blivet_device_class is not None: packages.extend(self.__class__.blivet_device_class._packages) - fmt = get_format(self._volume['fs_type']) + fmt = get_format(self._volume.get('fs_type')) packages.extend(fmt.packages) - if self._volume['encryption']: + if self._volume.get('encryption'): packages.extend(get_format('luks').packages) return packages @property def ultimately_present(self): """ Should this volume be present when we are finished? """ - return (self._volume['state'] == 'present' and + return (self._volume.get('state', 'present') == 'present' and (self._blivet_pool is None or self._blivet_pool.ultimately_present)) def _type_check(self): # pylint: disable=no-self-use @@ -345,6 +347,55 @@ def _look_up_device(self): self._device = None return # TODO: see if we can create this device w/ the specified name + def _update_from_device(self, param_name): + """ Return True if param_name's value was retrieved from a looked-up device. """ + log.debug("Updating volume settings from device: %r", self._device) + encrypted = "luks" in self._device.type or self._device.format.type == "luks" + if encrypted and "luks" in self._device.type: + luks_fmt = self._device.parents[0].format + elif encrypted: + luks_fmt = self._device.format + + if param_name == 'size': + self._volume['size'] = int(self._device.size.convert_to()) + elif param_name == 'fs_type' and (self._device.format.type or self._device.format.name != get_format(None).name): + self._volume['fs_type'] = self._device.format.type + elif param_name == 'fs_label': + self._volume['fs_label'] = getattr(self._device.format, 'label', "") or "" + elif param_name == 'mount_point': + self._volume['mount_point'] = getattr(self._device.format, 'mountpoint', None) + elif param_name == 'disks': + self._volume['disks'] = [d.name for d in self._device.disks] + elif param_name == 'encryption': + self._volume['encryption'] = encrypted + elif param_name == 'encryption_key_size' and encrypted: + self._volume['encryption_key_size'] = luks_fmt.key_size + elif param_name == 'encryption_key_file' and encrypted: + self._volume['encryption_key_file'] = luks_fmt.key_file + elif param_name == 'encryption_cipher' and encrypted: + self._volume['encryption_cipher'] = luks_fmt.cipher + elif param_name == 'encryption_luks_version' and encrypted: + self._volume['encryption_luks_version'] = luks_fmt.luks_version + else: + return False + + return True + + def _apply_defaults(self): + global volume_defaults + for name, default in volume_defaults.items(): + if name in self._volume: + continue + + default = None if default in ('none', 'None', 'null') else default + + if self._device: + # Apply values from the device if it already exists. + if not self._update_from_device(name): + self._volume[name] = default + else: + self._volume.setdefault(name, default) + def _get_format(self): """ Return a blivet.formats.DeviceFormat instance for this volume. """ fmt = get_format(self._volume['fs_type'], @@ -427,6 +478,8 @@ def manage(self): # look up the device self._look_up_device() + self._apply_defaults() + # schedule destroy if appropriate if not self.ultimately_present: self._destroy() @@ -617,6 +670,23 @@ def _create_raid_members(self, member_names): return members + def _update_from_device(self, param_name): + """ Return True if param_name's value was retrieved from a looked-up device. """ + if param_name == 'raid_level': + self._volume['raid_level'] = self._device.level.name + elif param_name == 'raid_chunk_size': + self._volume['raid_chunk_size'] = str(self._device.chunk_size) + elif param_name == 'raid_device_count': + self._volume['raid_device_count'] = self._device.member_devices + elif param_name == 'raid_spare_count': + self._volume['raid_spare_count'] = self._device.spares + elif param_name == 'raid_metadata_version': + self._volume['raid_metadata_version'] = self._device.metadata_version + else: + return super(BlivetMDRaidVolume, self)._update_from_device(param_name) + + return True + def _create(self): global safe_mode @@ -675,7 +745,8 @@ def _destroy(self): def _get_blivet_volume(blivet_obj, volume, bpool=None): """ Return a BlivetVolume instance appropriate for the volume dict. """ - volume_type = volume.get('type', bpool._pool['type'] if bpool else None) + global volume_defaults + volume_type = volume.get('type', bpool._pool['type'] if bpool else volume_defaults['type']) if volume_type not in _BLIVET_VOLUME_TYPES: raise BlivetAnsibleError("Volume '%s' has unknown type '%s'" % (volume['name'], volume_type)) @@ -700,7 +771,7 @@ def required_packages(self): if self.ultimately_present and self.__class__.blivet_device_class is not None: packages.extend(self.__class__.blivet_device_class._packages) - if self._pool['encryption']: + if self._pool.get('encryption'): packages.extend(get_format('luks').packages) return packages @@ -708,7 +779,7 @@ def required_packages(self): @property def ultimately_present(self): """ Should this pool be present when we are finished? """ - return self._pool['state'] == 'present' + return self._pool.get('state', 'present') == 'present' @property def _is_raid(self): @@ -779,6 +850,69 @@ def _look_up_device(self): self._device = None return # TODO: see if we can create this device w/ the specified name + # Apply encryption keys as appropriate + if any(d.encrypted for d in self._device.parents): + passphrase = self._pool.get("encryption_passphrase") + key_file = self._pool.get("encryption_key_file") + for member in self._device.parents: + if member.parents[0].format.type == "luks": + if passphrase: + member.parents[0].format.passphrase = passphrase + member.parents[0].original_format.passphrase = passphrase + if key_file: + member.parents[0].format.key_file = key_file + member.parents[0].original_format.key_file = key_file + + def _update_from_device(self, param_name): + """ Return True if param_name's value was retrieved from a looked-up device. """ + # We wouldn't have the pool device if the member devices weren't unlocked, so we do not + # have to consider the case where the devices are unlocked like we do for volumes. + encrypted = bool(self._device.parents) and all("luks" in d.type for d in self._device.parents) + raid = len(self._device.parents) == 1 and hasattr(self._device.parents[0].raw_device, 'level') + log.debug("BlivetPool._update_from_device: %s", self._device) + + if param_name == 'disks': + self._pool['disks'] = [d.name for d in self._device.disks] + elif param_name == 'encryption': + self._pool['encryption'] = encrypted + elif param_name == 'encryption_key_size' and encrypted: + self._pool['encryption_key_size'] = self._device.parents[0].parents[0].format.key_size + elif param_name == 'encryption_key_file' and encrypted: + self._pool['encryption_key_file'] = self._device.parents[0].parents[0].format.key_file + elif param_name == 'encryption_cipher' and encrypted: + self._pool['encryption_cipher'] = self._device.parents[0].parents[0].format.cipher + elif param_name == 'encryption_luks_version' and encrypted: + self._pool['encryption_luks_version'] = self._device.parents[0].parents[0].format.luks_version + elif param_name == 'raid_level' and raid: + self._pool['raid_level'] = self._device.parents[0].raw_device.level.name + elif param_name == 'raid_chunk_size' and raid: + self._pool['raid_chunk_size'] = str(self._device.parents[0].raw_device.chunk_size) + elif param_name == 'raid_device_count' and raid: + self._pool['raid_device_count'] = self._device.parents[0].raw_device.member_devices + elif param_name == 'raid_spare_count' and raid: + self._pool['raid_spare_count'] = self._device.parents[0].raw_device.spares + elif param_name == 'raid_metadata_version' and raid: + self._pool['raid_metadata_version'] = self._device.parents[0].raw_device.metadata_version + else: + return False + + return True + + + def _apply_defaults(self): + global pool_defaults + for name, default in pool_defaults.items(): + if name in self._pool: + continue + + default = None if default in ('none', 'None', 'null') else default + + if self._device: + if not self._update_from_device(name): + self._pool[name] = default + else: + self._pool.setdefault(name, default) + def _create_members(self): """ Schedule actions as needed to ensure pool member devices exist. """ members = list() @@ -826,7 +960,7 @@ def _create_members(self): def _get_volumes(self): """ Set up BlivetVolume instances for this pool's volumes. """ - for volume in self._pool['volumes']: + for volume in self._pool.get('volumes', []): bvolume = _get_blivet_volume(self._blivet, volume, self) self._blivet_volumes.append(bvolume) @@ -842,6 +976,7 @@ def manage(self): # look up the device self._look_up_disks() self._look_up_device() + self._apply_defaults() # schedule destroy if appropriate, including member type change if not self.ultimately_present: @@ -932,6 +1067,10 @@ def _create(self): def _get_blivet_pool(blivet_obj, pool): """ Return an appropriate BlivetPool instance for the pool dict. """ + if 'type' not in pool: + global pool_defaults + pool['type'] = pool_defaults['type'] + if pool['type'] not in _BLIVET_POOL_TYPES: raise BlivetAnsibleError("Pool '%s' has unknown type '%s'" % (pool['name'], pool['type'])) @@ -1147,6 +1286,8 @@ def run_module(): packages_only=dict(type='bool', required=False, default=False), disklabel_type=dict(type='str', required=False, default=None), safe_mode=dict(type='bool', required=False, default=True), + pool_defaults=dict(type='dict', required=False), + volume_defaults=dict(type='dict', required=False), use_partitions=dict(type='bool', required=False, default=True), diskvolume_mkfs_option_map=dict(type='dict', required=False, default={})) @@ -1187,6 +1328,14 @@ def run_module(): global diskvolume_mkfs_option_map diskvolume_mkfs_option_map = module.params['diskvolume_mkfs_option_map'] + global pool_defaults + if 'pool_defaults' in module.params: + pool_defaults = module.params['pool_defaults'] + + global volume_defaults + if 'volume_defaults' in module.params: + volume_defaults = module.params['volume_defaults'] + b = Blivet() b.reset() fstab = FSTab(b) @@ -1215,7 +1364,7 @@ def action_dict(action): module.fail_json(msg="multiple pools with the same name: {0}".format(",".join(duplicates)), **result) for pool in module.params['pools']: - duplicates = find_duplicate_names(pool['volumes']) + duplicates = find_duplicate_names(pool.get('volumes', list())) if duplicates: module.fail_json(msg="multiple volumes in pool '{0}' with the " "same name: {1}".format(pool['name'], ",".join(duplicates)), diff --git a/library/find_unused_disk.py b/library/find_unused_disk.py index a4dcde7b..0a6fc7d5 100644 --- a/library/find_unused_disk.py +++ b/library/find_unused_disk.py @@ -109,7 +109,7 @@ def get_partitions(disk_path): sys_name = get_sys_name(disk_path) partitions = list() for filename in os.listdir(SYS_CLASS_BLOCK + sys_name): - if re.match(sys_name + 'p?\d+$', filename): + if re.match(sys_name + r'p?\d+$', filename): partitions.append(filename) return partitions diff --git a/tasks/main-blivet.yml b/tasks/main-blivet.yml index cdef16cb..f0e2ee0a 100644 --- a/tasks/main-blivet.yml +++ b/tasks/main-blivet.yml @@ -9,90 +9,20 @@ name: "{{ blivet_package_list }}" state: present -## -## This is all to fill in missing defaults in the list of pools (and their volumes). -## -## The pools are easy, as are the standalone volumes. The pool volumes are trickier, -## perhaps because I am new to ansible/yaml/jinja. -## -- name: initialize internal facts - set_fact: - _storage_pools: [] # list of pools w/ defaults applied as necessary - _storage_volumes: [] # list of standalone volumes w/ defaults applied as necessary - _storage_vol_defaults: [] # list w/ volume default dict for each pool volume - _storage_vols_no_defaults: [] # combined list of pool volumes w/o defaults applied - _storage_vols_w_defaults: [] # combined list of volumes w/ defaults applied - _storage_vol_pools: [] # combined list of pool name for each volume - _storage_vols_no_defaults_by_pool: {} # dict w/ pool name keys and pool volume list values - -- name: Apply defaults to pools and volumes [1/6] - set_fact: - _storage_pools: "{{ _storage_pools|default([]) }} + [ {{ storage_pool_defaults|combine(pool) }} ]" - loop: "{{ storage_pools|default([]) }}" - loop_control: - loop_var: pool - when: storage_pools is defined - -- name: Apply defaults to pools and volumes [2/6] - set_fact: - _storage_vols_no_defaults: "{{ _storage_vols_no_defaults|default([]) }} + [{{ item.1 }}]" - _storage_vol_defaults: "{{ _storage_vol_defaults|default([]) }} + [{{ storage_volume_defaults }}]" - _storage_vol_pools: "{{ _storage_vol_pools|default([]) }} + ['{{ item.0.name }}']" - loop: "{{ _storage_pools|subelements('volumes', skip_missing=true) }}" - when: storage_pools is defined - -- name: Apply defaults to pools and volumes [3/6] - set_fact: - _storage_vols_w_defaults: "{{ _storage_vols_w_defaults|default([]) }} + [ {{ item.1|combine(item.0, {'pool': _storage_vol_pools[idx]}) }} ]" - loop: "{{ _storage_vols_no_defaults|zip(_storage_vol_defaults)|list }}" - loop_control: - index_var: idx - when: storage_pools is defined - -- name: Apply defaults to pools and volumes [4/6] - set_fact: - # json_query(...) used instead of "|selectattr('pool', 'equalto', item.name)|list" - # as that expression wouldn't work with Jinja versions <2.8 - _storage_vols_no_defaults_by_pool: "{{ _storage_vols_no_defaults_by_pool|default({})| - combine({item.name: _storage_vols_w_defaults|json_query('[?pool==`\"{}\"`]'.format(item.name))}) }}" - loop: "{{ _storage_pools }}" - when: storage_pools is defined - -- name: Apply defaults to pools and volumes [5/6] - set_fact: - _storage_pools: "{{ _storage_pools[:idx] }} + - [ {{ pool|combine({'volumes': _storage_vols_no_defaults_by_pool[pool.name]}) }} ] + - {{ _storage_pools[idx+1:] }}" - loop: "{{ _storage_pools }}" - loop_control: - loop_var: pool - index_var: idx - when: storage_pools is defined - -- name: Apply defaults to pools and volumes [6/6] - set_fact: - _storage_volumes: "{{ _storage_volumes|default([]) }} + [ {{ storage_volume_defaults|combine(volume) }} ]" - loop: "{{ storage_volumes|default([]) }}" - loop_control: - loop_var: volume - when: storage_volumes is defined - -## -## End of absurdly long process to inject defaults into user-specified pools and volumes lists. -## - - debug: - var: _storage_pools + var: storage_pools - debug: - var: _storage_volumes + var: storage_volumes - name: get required packages blivet: - pools: "{{ _storage_pools }}" - volumes: "{{ _storage_volumes }}" + pools: "{{ storage_pools|default([]) }}" + volumes: "{{ storage_volumes|default([]) }}" use_partitions: "{{ storage_use_partitions }}" disklabel_type: "{{ storage_disklabel_type }}" + pool_defaults: "{{ storage_pool_defaults }}" + volume_defaults: "{{ storage_volume_defaults }}" packages_only: true register: package_info @@ -101,15 +31,40 @@ name: "{{ package_info.packages }}" state: present -- name: manage the pools and volumes to match the specified state - blivet: - pools: "{{ _storage_pools }}" - volumes: "{{ _storage_volumes }}" - use_partitions: "{{ storage_use_partitions }}" - disklabel_type: "{{ storage_disklabel_type }}" - safe_mode: "{{ storage_safe_mode }}" - diskvolume_mkfs_option_map: "{{ __storage_blivet_diskvolume_mkfs_option_map|d(omit) }}" - register: blivet_output +- service_facts: + +- set_fact: + # For an explanation of the to_json|from_json silliness, see + # https://github.com/ansible-collections/community.general/issues/320 + storage_cryptsetup_services: "{{ ansible_facts.services|to_json|from_json|json_query('*.name')|json_query('[?starts_with(@, `\"systemd-cryptsetup@\"`)]') }}" + +- block: + - name: Mask the systemd cryptsetup services + systemd: + name: "{{ item }}" + masked: yes + loop: "{{ storage_cryptsetup_services }}" + + - name: manage the pools and volumes to match the specified state + blivet: + pools: "{{ storage_pools|default([]) }}" + volumes: "{{ storage_volumes|default([]) }}" + use_partitions: "{{ storage_use_partitions }}" + disklabel_type: "{{ storage_disklabel_type }}" + pool_defaults: "{{ storage_pool_defaults }}" + volume_defaults: "{{ storage_volume_defaults }}" + safe_mode: "{{ storage_safe_mode }}" + diskvolume_mkfs_option_map: "{{ __storage_blivet_diskvolume_mkfs_option_map|d(omit) }}" + register: blivet_output + rescue: + - fail: + msg: "{{ ansible_failed_result }}" + always: + - name: Unmask the systemd cryptsetup services + systemd: + name: "{{ item }}" + masked: no + loop: "{{ storage_cryptsetup_services }}" - debug: var: blivet_output diff --git a/tests/run_blivet.yml b/tests/run_blivet.yml index 128fde2d..cf09d48e 100644 --- a/tests/run_blivet.yml +++ b/tests/run_blivet.yml @@ -1,7 +1,12 @@ --- +- include_vars: + file: roles/linux-system-roles.storage/defaults/main.yml + - name: test lvm and xfs package deps blivet: packages_only: "{{ packages_only }}" pools: "{{ storage_pools|default([]) }}" volumes: "{{ storage_volumes|default([]) }}" + pool_defaults: "{{ storage_pool_defaults }}" + volume_defaults: "{{ storage_volume_defaults }}" register: blivet_output diff --git a/tests/test-verify-volume-encryption.yml b/tests/test-verify-volume-encryption.yml index a7214736..39a34108 100644 --- a/tests/test-verify-volume-encryption.yml +++ b/tests/test-verify-volume-encryption.yml @@ -16,7 +16,7 @@ - name: Verify the presence/absence of the LUKS device node assert: that: "{{ storage_test_luks_dev.stat.exists and storage_test_luks_dev.stat.isblk - if _storage_test_volume_present or storage_test_volume.type == 'disk' + if _storage_test_volume_present else not storage_test_luks_dev.stat.exists }}" msg: "Incorrect device node presence for volume {{ storage_test_volume.name }}" @@ -43,19 +43,19 @@ assert: that: "{{ luks_dump.stdout|regex_search('^\\s+Version: ' + storage_test_volume.encryption_luks_version + '$') }}" msg: "Wrong LUKS version for volume {{ storage_test_volume.name }}" - when: _storage_test_volume_present and storage_test_volume.encryption_luks_version + when: _storage_test_volume_present and storage_test_volume.encryption and storage_test_volume.encryption_luks_version - name: Check LUKS key size assert: that: "{{ luks_dump.stdout|regex_search('^\\s+Key: ' + storage_test_volume.encryption_key_size|string + ' bits$') }}" msg: "Wrong key size for volume {{ storage_test_volume.name }}" - when: _storage_test_volume_present and storage_test_volume.encryption_key_size + when: _storage_test_volume_present and storage_test_volume.encryption and storage_test_volume.encryption_key_size - name: Check LUKS cipher assert: that: "{{ luks_dump.stdout|regex_search('^\\s+Cipher: ' + storage_test_volume.encryption_cipher + '$') }}" msg: "Wrong key size for volume {{ storage_test_volume.name }}" - when: _storage_test_volume_present and storage_test_volume.encryption_cipher + when: _storage_test_volume_present and storage_test_volume.encryption and storage_test_volume.encryption_cipher - set_fact: _storage_test_expected_crypttab_entries: "{{ (storage_test_volume.encryption and _storage_test_volume_present)|ternary(1, 0) }}" diff --git a/tests/tests_change_fs.yml b/tests/tests_change_fs.yml index f6f46359..236891c5 100644 --- a/tests/tests_change_fs.yml +++ b/tests/tests_change_fs.yml @@ -45,6 +45,25 @@ - include_tasks: verify-role-results.yml + - name: Re-run the role on the same volume without specifying fs_type + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + volumes: + - name: test1 + size: "{{ volume_size }}" + mount_point: "{{ mount_location }}" + + - name: Verify the output of the duplicate volumes test + assert: + that: not blivet_output.changed and blivet_output.pools[0].volumes[0].fs_type == fs_after + msg: "Failed to preserve omitted fs_type on existing lvm volume" + + - include_tasks: verify-role-results.yml + - name: Repeat the previous invocation to verify idempotence include_role: name: linux-system-roles.storage diff --git a/tests/tests_create_disk_then_remove.yml b/tests/tests_create_disk_then_remove.yml index 79bb44f8..cf58ef11 100644 --- a/tests/tests_create_disk_then_remove.yml +++ b/tests/tests_create_disk_then_remove.yml @@ -21,11 +21,12 @@ - name: test1 type: disk disks: "{{ unused_disks }}" + fs_type: ext4 mount_point: "{{ mount_location }}" - include_tasks: verify-role-results.yml - - name: Repeat the previous invocation to verify idempotence + - name: Repeat the previous invocation minus fs_type to verify idempotence include_role: name: linux-system-roles.storage vars: @@ -35,6 +36,10 @@ disks: "{{ unused_disks }}" mount_point: "{{ mount_location }}" + - assert: + that: not blivet_output.changed and blivet_output.volumes[0].fs_type == 'ext4' + msg: "File system not preserved on existing partition volume." + - include_tasks: verify-role-results.yml - name: Remove the disk device created above diff --git a/tests/tests_create_partition_volume_then_remove.yml b/tests/tests_create_partition_volume_then_remove.yml index 5abcce5f..351b022b 100644 --- a/tests/tests_create_partition_volume_then_remove.yml +++ b/tests/tests_create_partition_volume_then_remove.yml @@ -24,11 +24,12 @@ volumes: - name: test1 type: partition + fs_type: ext4 mount_point: "{{ mount_location }}" - include_tasks: verify-role-results.yml - - name: Repeat the previous invocation to verify idempotence + - name: Repeat the previous invocation minus fs_type to verify idempotence include_role: name: linux-system-roles.storage vars: @@ -41,6 +42,10 @@ type: partition mount_point: "{{ mount_location }}" + - assert: + that: not blivet_output.changed and blivet_output.pools[0].volumes[0].fs_type == 'ext4' + msg: "File system not preserved on existing partition volume." + - include_tasks: verify-role-results.yml - name: Remove the partition created above diff --git a/tests/tests_luks.yml b/tests/tests_luks.yml index 9d7d55e3..63cbd8d6 100644 --- a/tests/tests_luks.yml +++ b/tests/tests_luks.yml @@ -426,6 +426,27 @@ - include_tasks: verify-role-results.yml + - name: Verify preservation of encryption settings on existing LVM volume + include_role: + name: linux-system-roles.storage + vars: + storage_safe_mode: false + storage_pools: + - name: foo + type: lvm + disks: "{{ unused_disks }}" + volumes: + - name: test1 + mount_point: "{{ mount_location }}" + size: 4g + + - assert: + that: not blivet_output.changed and + blivet_output.pools[0].volumes[0].encryption and + blivet_output.pools[0].volumes[0].encryption_luks_version == 'luks1' + + - include_tasks: verify-role-results.yml + - import_tasks: create-test-file.yml - name: Test for correct handling of safe_mode diff --git a/tests/tests_luks_pool.yml b/tests/tests_luks_pool.yml index 25f62074..84646cb7 100644 --- a/tests/tests_luks_pool.yml +++ b/tests/tests_luks_pool.yml @@ -223,12 +223,13 @@ - name: foo type: lvm disks: "{{ unused_disks }}" - encryption: true - encryption_password: 'yabbadabbadoo' volumes: - name: test1 mount_point: "{{ mount_location_2 }}" - size: 4g + + - assert: + that: blivet_output.pools[0].encryption + msg: "Failed to implicitly preserve encryption on existing pool." - import_tasks: verify-data-preservation.yml vars: diff --git a/tests/tests_raid_pool_options.yml b/tests/tests_raid_pool_options.yml index ecd655e3..d960c0ce 100644 --- a/tests/tests_raid_pool_options.yml +++ b/tests/tests_raid_pool_options.yml @@ -46,6 +46,36 @@ - include_tasks: verify-role-results.yml + - name: Repeat the previous invocation minus the pool raid options + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: vg1 + disks: "{{ unused_disks }}" + type: lvm + state: present + volumes: + - name: lv1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: lv2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + - name: lv3 + size: "{{ volume3_size }}" + mount_point: "{{ mount_location3 }}" + + - assert: + that: not blivet_output.changed and + blivet_output.pools[0].raid_level == 'raid1' and + blivet_output.pools[0].raid_device_count == 2 and + blivet_output.pools[0].raid_spare_count == 1 and + blivet_output.pools[0].raid_metadata_version == '1.0' + msg: "Failure to preserve RAID settings for preexisting pool." + + - include_tasks: verify-role-results.yml + - name: Remove the pool created above include_role: name: linux-system-roles.storage diff --git a/tests/tests_raid_volume_options.yml b/tests/tests_raid_volume_options.yml index 0530fd4f..7e66943f 100644 --- a/tests/tests_raid_volume_options.yml +++ b/tests/tests_raid_volume_options.yml @@ -32,6 +32,27 @@ - include_tasks: verify-role-results.yml + - name: Re-run the same invocation without the RAID params" + include_role: + name: linux-system-roles.storage + vars: + storage_volumes: + - name: test1 + type: raid + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location }}" + state: present + + - assert: + that: not blivet_output.changed and + blivet_output.volumes[0].raid_level == 'raid1' and + blivet_output.volumes[0].raid_device_count == 2 and + blivet_output.volumes[0].raid_spare_count == 1 and + blivet_output.volumes[0].raid_metadata_version == '1.0' + msg: "Failure to preserve RAID settings for preexisting volume." + + - include_tasks: verify-role-results.yml + - name: Remove the disk device created above include_role: name: linux-system-roles.storage diff --git a/tests/verify-role-results.yml b/tests/verify-role-results.yml index 06f96f4a..c99370bc 100644 --- a/tests/verify-role-results.yml +++ b/tests/verify-role-results.yml @@ -25,6 +25,7 @@ command: cat /etc/crypttab register: storage_test_crypttab changed_when: false + failed_when: false # # Verify pools and the volumes they contain.