From cee2a41771fc4d85dadf9077ce72824de6ea8cf3 Mon Sep 17 00:00:00 2001 From: Franziska Kunsmann Date: Sat, 17 Jul 2021 18:09:35 +0200 Subject: [PATCH] items/zfs_pool: rewrite item to support all kinds of zfs pools --- bundles/smartd/metadata.py | 14 ++-- bundles/zfs/items.py | 12 +-- items/zfs_pool.py | 153 ++++++++++++++++++------------------ nodes/home/nas.py | 35 ++++++--- nodes/htz-cloud/influxdb.py | 6 +- nodes/htz-cloud/luther.py | 6 +- nodes/htz-cloud/pleroma.py | 6 +- nodes/htz-cloud/sewfile.py | 6 +- nodes/ovh/icinga2.py | 6 +- nodes/rx300.py | 7 +- 10 files changed, 134 insertions(+), 117 deletions(-) diff --git a/bundles/smartd/metadata.py b/bundles/smartd/metadata.py index 8d43d94..f1e91e7 100644 --- a/bundles/smartd/metadata.py +++ b/bundles/smartd/metadata.py @@ -23,13 +23,13 @@ defaults = { def zfs_disks_to_metadata(metadata): disks = set() - for _, config in metadata.get('zfs/pools', {}).items(): - if 'device' in config: - disks.add(config['device']) - else: - for t in {'mirror', 'raidz', 'raidz2', 'raidz3'}: - for device in config.get(t, set()): - disks.add(device) + for config in metadata.get('zfs/pools', {}).values(): + for option in config: + if option.get('type', '') in {'log', 'cache'}: + continue + + for disk in option['devices']: + disks.add(disk) return { 'smartd': { diff --git a/bundles/zfs/items.py b/bundles/zfs/items.py index 86ab376..6eabb96 100644 --- a/bundles/zfs/items.py +++ b/bundles/zfs/items.py @@ -79,16 +79,12 @@ svc_systemd = { zfs_datasets = node.metadata.get('zfs/datasets', {}) for name, attrs in node.metadata.get('zfs/pools', {}).items(): - zfs_pools[name] = attrs + zfs_pools[name] = { + 'config': attrs, + } if node.os_version[0] > 10: - actions[f'pool_{name}_enable_trim'] = { - 'command': f'zpool set autotrim=on {name}', - 'unless': f'zpool get autotrim -H -o value {name} | grep -q on', - 'needs': [ - f'zfs_pool:{name}' - ] - } + zfs_pools[name]['autotrim'] = True directories = { "/etc/zfs-snapshot-backup-pre.d": { diff --git a/items/zfs_pool.py b/items/zfs_pool.py index e8caca0..a16ecea 100644 --- a/items/zfs_pool.py +++ b/items/zfs_pool.py @@ -6,43 +6,6 @@ from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ -def create_mirrors(node, path, mirrors): - cmd = "" - for devices in mirrors: - actual_targets = [] - for device in devices: - actual_targets.append(quote(prepare_blockdevice(node, device))) - cmd += "mirror {} ".format(" ".join(actual_targets)) - - node.run("zpool create {} {}".format(quote(path), cmd)) - node.run("zfs unmount {}".format(quote(path))) - - -def create_raidz(node, path, devices, raid='raidz'): - cmd = "" - actual_targets = [] - for device in devices: - actual_targets.append(quote(prepare_blockdevice(node, device))) - cmd += "{} {} ".format(raid, " ".join(actual_targets)) - - node.run("zpool create {} {}".format(quote(path), cmd)) - node.run("zfs unmount {}".format(quote(path))) - - -def create_single(node, path, device): - actual_target = prepare_blockdevice(node, device) - node.run("zpool create {} {}".format(quote(path), quote(actual_target))) - node.run("zfs unmount {}".format(quote(path))) - - -def does_exist(node, path): - status_result = node.run( - "zpool list {}".format(quote(path)), - may_fail=True, - ) - return status_result.return_code == 0 - - def prepare_blockdevice(node, device): # To increase our chances of success, we run partprobe beforehand to # make the kernel re-scan all devices. @@ -92,53 +55,68 @@ class ZFSPool(Item): """ BUNDLE_ATTRIBUTE_NAME = "zfs_pools" ITEM_ATTRIBUTES = { - 'device': None, - 'mirrors': None, - 'raidz': None, - 'raidz2': None, - 'raidz3': None, + 'config': None, + 'autotrim': None, + 'autoreplace': None, + 'autoexpand': None, } ITEM_TYPE_NAME = "zfs_pool" def __repr__(self): - return "".format( + return "".format( self.name, - self.attributes['device'], - self.attributes['mirrors'], - self.attributes['raidz'], + self.attributes['autoexpand'], + self.attributes['autoreplace'], + self.attributes['autotrim'], + self.attributes['config'], ) def cdict(self): - return {} + ret = {} + for i in {'autoexpand', 'autoreplace', 'autotrim'}: + if self.attributes.get(i): + ret[i] = self.attributes[i] + return ret @property def devices_used(self): - devices = [] - if self.attributes['device'] is not None: - devices.append(self.attributes['device']) - if self.attributes['mirrors'] is not None: - for mirror in self.attributes['mirrors']: - devices.extend(mirror) - if self.attributes['raidz'] is not None: - devices.extend(self.attributes['raidz']) - return devices + devices = set() + for option in self.attributes['config']: + for device in option['devices']: + devices.add(device) + return sorted(devices) def fix(self, status): if status.must_be_created: - if self.attributes['device'] is not None: - create_single(self.node, self.name, self.attributes['device']) - elif self.attributes['mirrors'] is not None: - create_mirrors(self.node, self.name, self.attributes['mirrors']) - elif self.attributes['raidz'] is not None: - create_raidz(self.node, self.name, self.attributes['raidz']) - elif self.attributes['raidz2'] is not None: - create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz2') - elif self.attributes['raidz2'] is not None: - create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz3') + cmdline = [] + for option in self.attributes['config']: + if option.get('type'): + cmdline.append(option['type']) + if option['type'] == 'log' and len(option['devices']) > 1: + cmdline.append('mirror') + for device in sorted(option['devices']): + cmdline.append(quote(prepare_blockdevice(node, device))) + + self.run('zpool create {} {}'.format(quote(self.name), ' '.join(cmdline))) + + for attr in {'autoexpand', 'autoreplace', 'autotrim'}: + if attr in status.keys_to_fix: + state_str = 'on' if status.cdict[attr] else 'off' + self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name))) def sdict(self): - # We don't care about the device if the pool already exists. - return {} if does_exist(self.node, self.name) else None + status_result = self.run('zpool list {}'.format(quote(self.name)), may_fail=True) + if status_result.return_code != 0: + return {} + + autoexpand_state = self.run('zpool get autoexpand -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() + autoreplace_state = self.run('zpool get autoreplace -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() + autotrim_state = self.run('zpool get autotrim -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() + return { + 'autoexpand': (autoexpand_state == 'on'), + 'autoreplace': (autoreplace_state == 'on'), + 'autotrim': (autotrim_state == 'on'), + } def test(self): duplicate_devices = [ @@ -173,15 +151,40 @@ class ZFSPool(Item): @classmethod def validate_attributes(cls, bundle, item_id, attributes): - device_config = [] - for key in ('device', 'mirrors', 'raidz', 'raidz2', 'raidz3'): - device_config.append(attributes.get(key)) - device_config = [key for key in device_config if key is not None] - if len(device_config) != 1: + if not isinstance(attributes['config'], list): raise BundleError(_( - "{item} on node {node} must have exactly one of " - "'device', 'mirrors', 'raidz', 'raidz2' or 'raidz3'" + "{item} on node {node} 'config'" ).format( item=item_id, node=bundle.node.name, )) + + for config in attributes['config']: + if not config.get('type', '') in {'', 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}: + raise BundleError(_( + "{item} on node {node} has invalid type '{type}', " + "must be one of (unset), 'mirror', 'raidz', 'raidz2', " + "'raidz3', 'cache', 'log'" + ).format( + item=item_id, + node=bundle.node.name, + type=config['type'], + )) + + if not config.get('devices', set()): + raise BundleError(_( + "{item} on node {node} uses no devices!" + ).format( + item=item_id, + node=bundle.node.name, + )) + + if config.get('type') == 'log': + if not 0 < len(config['devices']) < 3: + raise BundleError(_( + "{item} on node {node} type 'log' must use exactly " + "one or two devices" + ).format( + item=item_id, + node=bundle.node.name, + )) diff --git a/nodes/home/nas.py b/nodes/home/nas.py index f3314ac..c0a6b24 100644 --- a/nodes/home/nas.py +++ b/nodes/home/nas.py @@ -197,16 +197,33 @@ nodes['home.nas'] = { }, 'pools': { # Configured manually. Don't touch! - 'storage': { - 'raidz2': { - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR', + 'storage': [ + { + 'type': 'raidz2', + 'devices': { + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR', + }, }, - }, + { + 'type': 'log', + 'devices': { + '/dev/disk/by-id/ata-TS64GSSD370_B807810503-part1', + '/dev/disk/by-id/ata-TS64GSSD370_B807810527-part1', + }, + }, + { + 'type': 'cache', + 'devices': { + '/dev/disk/by-id/ata-TS64GSSD370_B807810503-part2', + '/dev/disk/by-id/ata-TS64GSSD370_B807810527-part2', + }, + }, + ] }, 'datasets': { 'storage/backups': {}, diff --git a/nodes/htz-cloud/influxdb.py b/nodes/htz-cloud/influxdb.py index b1f5e1a..4b4a5e4 100644 --- a/nodes/htz-cloud/influxdb.py +++ b/nodes/htz-cloud/influxdb.py @@ -57,9 +57,9 @@ nodes['htz-cloud.influxdb'] = { }, 'zfs': { 'pools': { - 'tank': { - 'device': '/dev/sdb', - }, + 'tank': [{ + 'devices': {'/dev/sdb'}, + }], }, }, }, diff --git a/nodes/htz-cloud/luther.py b/nodes/htz-cloud/luther.py index f601e4f..e20b33b 100644 --- a/nodes/htz-cloud/luther.py +++ b/nodes/htz-cloud/luther.py @@ -80,9 +80,9 @@ nodes['htz-cloud.luther'] = { }, 'zfs': { 'pools': { - 'tank': { - 'device': '/dev/sdb', - }, + 'tank': [{ + 'devices': {'/dev/sdb'}, + }], }, 'datasets': { 'tank/luther-website': { diff --git a/nodes/htz-cloud/pleroma.py b/nodes/htz-cloud/pleroma.py index 5e3582d..6c0833f 100644 --- a/nodes/htz-cloud/pleroma.py +++ b/nodes/htz-cloud/pleroma.py @@ -66,9 +66,9 @@ nodes['htz-cloud.pleroma'] = { }, 'zfs': { 'pools': { - 'tank': { - 'device': '/dev/sdb', - }, + 'tank': [{ + 'devices': {'/dev/sdb'}, + }], }, }, }, diff --git a/nodes/htz-cloud/sewfile.py b/nodes/htz-cloud/sewfile.py index f0b0a16..4ae4b4f 100644 --- a/nodes/htz-cloud/sewfile.py +++ b/nodes/htz-cloud/sewfile.py @@ -69,9 +69,9 @@ nodes['htz-cloud.sewfile'] = { }, 'zfs': { 'pools': { - 'tank': { - 'device': '/dev/sdb', - }, + 'tank': [{ + 'devices': {'/dev/sdb'}, + }], }, 'datasets': { 'tank/mysql': { diff --git a/nodes/ovh/icinga2.py b/nodes/ovh/icinga2.py index e6df61c..6003d4b 100644 --- a/nodes/ovh/icinga2.py +++ b/nodes/ovh/icinga2.py @@ -133,9 +133,9 @@ nodes['ovh.icinga2'] = { }, 'zfs': { 'pools': { - 'tank': { - 'device': '/dev/sdb', - }, + 'tank': [{ + 'devices': {'/dev/sdb'}, + }], }, }, 'vm': { diff --git a/nodes/rx300.py b/nodes/rx300.py index 02fa5a6..8960afe 100644 --- a/nodes/rx300.py +++ b/nodes/rx300.py @@ -310,14 +310,15 @@ nodes['rx300'] = { 'zfs_arc_max_gb': 16, }, 'pools': { - 'tank': { - 'raidz': { + 'tank': [{ + 'type': 'raidz', + 'devices': { '/dev/sda', '/dev/sdb', '/dev/sdc', '/dev/sdd', }, - }, + }], }, 'datasets': { 'tank/libvirt': {