diff --git a/library/blivet.py b/library/blivet.py index 0daef053..648d717e 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -704,7 +704,7 @@ def _type_check(self): # pylint: disable=no-self-use def _look_up_disks(self): """ Look up the pool's disks in blivet's device tree. """ - if not self._pool['disks']: + if not self._device and not self._pool['disks']: raise BlivetAnsibleError("no disks specified for pool '%s'" % self._pool['name']) elif not isinstance(self._pool['disks'], list): raise BlivetAnsibleError("pool disks must be specified as a list") @@ -715,7 +715,7 @@ def _look_up_disks(self): if device is not None: # XXX fail if any disk isn't resolved? disks.append(device) - if self._pool['disks'] and not disks: + if self._pool['disks'] and not self._device and not disks: raise BlivetAnsibleError("unable to resolve any disks specified for pool '%s' (%s)" % (self._pool['name'], self._pool['disks'])) self._disks = disks @@ -820,9 +820,9 @@ def _manage_volumes(self): def manage(self): """ Schedule actions to configure this pool according to the yaml input. """ # look up the device - self._look_up_disks() self._look_up_device() self._apply_defaults() + self._look_up_disks() # schedule destroy if appropriate, including member type change if not self.ultimately_present or self._member_management_is_destructive(): diff --git a/tests/test-verify-pool-members.yml b/tests/test-verify-pool-members.yml index 791f966c..bc6180bd 100644 --- a/tests/test-verify-pool-members.yml +++ b/tests/test-verify-pool-members.yml @@ -24,7 +24,7 @@ assert: that: "{{ ansible_lvm.pvs|dict2items|json_query('[?value.vg==`\"{}\"`]'.format(storage_test_pool.name))|length == _storage_test_expected_pv_count|int }}" msg: "Unexpected PV count for pool {{ storage_test_pool.name }}" - when: storage_test_pool.type == 'lvm' + when: storage_test_pool.type == 'lvm' and storage_test_pool.disks - set_fact: _storage_test_expected_pv_type: "{{ 'crypt' if storage_test_pool.encryption else 'disk' }}" diff --git a/tests/tests_existing_lvm_pool.yml b/tests/tests_existing_lvm_pool.yml new file mode 100644 index 00000000..ca1a7fb2 --- /dev/null +++ b/tests/tests_existing_lvm_pool.yml @@ -0,0 +1,54 @@ +--- +- hosts: all + become: true + vars: + mount_location: '/opt/test1' + volume_group_size: '5g' + volume_size: '4g' + pool_name: foo + + tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "{{ volume_group_size }}" + max_return: 1 + + - name: Create one LVM logical volume under one volume group + include_role: + name: storage + vars: + storage_pools: + - name: "{{ pool_name }}" + disks: "{{ unused_disks }}" + volumes: + - name: test1 + size: "{{ volume_size }}" + + - include_tasks: verify-role-results.yml + + - name: Create another volume in the existing pool, identified only by name. + include_role: + name: storage + vars: + storage_pools: + - name: "{{ pool_name }}" + volumes: + - name: newvol + size: '2 GiB' + fs_type: ext4 + fs_label: newvol + + - include_tasks: verify-role-results.yml + + - name: Clean up. + include_role: + name: storage + vars: + storage_pools: + - name: "{{ pool_name }}" + state: absent + + - include_tasks: verify-role-results.yml