From 1ae328d8f3cff0e3ff6f9733fc39d718443ed246 Mon Sep 17 00:00:00 2001 From: Franziska Kunsmann Date: Fri, 30 Jul 2021 15:44:34 +0200 Subject: [PATCH] items/zfs_*: improve --- items/zfs_dataset.py | 10 ----- items/zfs_pool.py | 97 +++++++++++++++++--------------------------- 2 files changed, 38 insertions(+), 69 deletions(-) diff --git a/items/zfs_dataset.py b/items/zfs_dataset.py index 6c38538..badf217 100644 --- a/items/zfs_dataset.py +++ b/items/zfs_dataset.py @@ -120,16 +120,6 @@ class ZFSDataset(Item): # parent only. yield item.id - # XXX This populates 'needs', not 'needed_by'. We have opened - # an issue: https://github.com/bundlewrap/bundlewrap/issues/648 -# elif self.attributes.get('mountpoint'): -# for item_type in ['directory', 'file', 'git_deploy']: -# if ( -# item.ITEM_TYPE_NAME == item_type and -# item.name.startswith('{}:{}'.format(item_type, self.attributes['mountpoint'])) -# ): -# yield item.id - if not pool_item_found: raise BundleError(_( diff --git a/items/zfs_pool.py b/items/zfs_pool.py index a16ecea..82abdcb 100644 --- a/items/zfs_pool.py +++ b/items/zfs_pool.py @@ -6,52 +6,9 @@ from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ -def prepare_blockdevice(node, device): - # To increase our chances of success, we run partprobe beforehand to - # make the kernel re-scan all devices. - node.run("partprobe", may_fail=True) - - # Try to find out if the device already contains some filesystem. - # Please note that there is no 100% reliable way to do this. - res = node.run("lsblk -rndo fstype {}".format(quote(device))) - detected = res.stdout.decode('UTF-8').strip() - if detected != "": - raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format( - device, detected)) - - res = node.run("lsblk -rndo type {}".format(device)) - if res.stdout.decode('UTF-8').strip() == "disk": - # We create a new partition on a raw disk. That partition will - # be used for ZFS. Yes, this is on purpose. No, creating a pool - # on raw disks does not work properly on Linux. - node.run("parted -s {} mklabel gpt".format(quote(device))) - node.run("parted -s {} mkpart -a optimal primary 0% 100%".format(quote(device))) - node.run("partprobe") - - # Simply append a "1" to get to the first partition. - # - # XXX I know that this fails if you're using /dev/disk/by-*. - # Yes, this is a problem if your device names are not - # predictable. Yes, we could use "lsblk" to try to find the - # first partition ... but "lsblk" still reports it as - # "/dev/vdb1" instead of "/dev/disk/by-foo/bar-part1". - # - # This is an unsolved problem. Please configure your VMs to use - # predictable device names. - if device.find('nvme') != -1: # NVME Devices have the partitions with the prefix pX - partition = "{}p1".format(device) - - else: - partition = "{}1".format(device) - - return partition - else: - return device - - class ZFSPool(Item): """ - Creates ZFS pools and the required partitions. + Creates ZFS pools. """ BUNDLE_ATTRIBUTE_NAME = "zfs_pools" ITEM_ATTRIBUTES = { @@ -59,20 +16,24 @@ class ZFSPool(Item): 'autotrim': None, 'autoreplace': None, 'autoexpand': None, + 'ashift': None, } ITEM_TYPE_NAME = "zfs_pool" def __repr__(self): - return "".format( + return "".format( self.name, self.attributes['autoexpand'], self.attributes['autoreplace'], self.attributes['autotrim'], + self.attributes['ashift'], self.attributes['config'], ) def cdict(self): ret = {} + # ashift can only be set at pool creation, that's why it's missing + # here. for i in {'autoexpand', 'autoreplace', 'autotrim'}: if self.attributes.get(i): ret[i] = self.attributes[i] @@ -94,28 +55,46 @@ class ZFSPool(Item): cmdline.append(option['type']) if option['type'] == 'log' and len(option['devices']) > 1: cmdline.append('mirror') + for device in sorted(option['devices']): - cmdline.append(quote(prepare_blockdevice(node, device))) + res = node.run("lsblk -rndo fstype {}".format(quote(device))) + detected = res.stdout.decode('UTF-8').strip() + if detected != "": + raise BundleError(_("Node {}, ZFSPool {}: Device {} to be used for ZFS, but it is not empty! Has '{}'.").format(self.node.name, self.name, device, detected)) - self.run('zpool create {} {}'.format(quote(self.name), ' '.join(cmdline))) + cmdline.append(quote(device)) - for attr in {'autoexpand', 'autoreplace', 'autotrim'}: - if attr in status.keys_to_fix: - state_str = 'on' if status.cdict[attr] else 'off' - self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name))) + options = set() + if self.attributes['ashift']: + options.add('-o ashift={}'.format(self.attributes['ashift'])) + + self.run('zpool create {} {} {}'.format( + ' '.join(sorted(options)), + quote(self.name), + ' '.join(cmdline), + )) + + for attr in status.keys_to_fix: + state_str = 'on' if status.cdict[attr] else 'off' + self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name))) def sdict(self): status_result = self.run('zpool list {}'.format(quote(self.name)), may_fail=True) if status_result.return_code != 0: return {} - autoexpand_state = self.run('zpool get autoexpand -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() - autoreplace_state = self.run('zpool get autoreplace -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() - autotrim_state = self.run('zpool get autotrim -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() + pool_status = {} + for line in self.run('zpool get all -H -o all {}'.format(quote(self.name)), may_fail=True).stdout.decode().splitlines(): + try: + pname, prop, value, source = line.split() + pool_status[prop.strip()] = value.strip() + except (IndexError, ValueError): + continue + return { - 'autoexpand': (autoexpand_state == 'on'), - 'autoreplace': (autoreplace_state == 'on'), - 'autotrim': (autotrim_state == 'on'), + 'autoexpand': (pool_status.get('autoexpand') == 'on'), + 'autoreplace': (pool_status.get('autoreplace') == 'on'), + 'autotrim': (pool_status.get('autotrim') == 'on'), } def test(self): @@ -153,14 +132,14 @@ class ZFSPool(Item): def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes['config'], list): raise BundleError(_( - "{item} on node {node} 'config'" + "{item} on node {node}: option 'config' must be a list" ).format( item=item_id, node=bundle.node.name, )) for config in attributes['config']: - if not config.get('type', '') in {'', 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}: + if config.get('type', None) not in {None, 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}: raise BundleError(_( "{item} on node {node} has invalid type '{type}', " "must be one of (unset), 'mirror', 'raidz', 'raidz2', "