diff --git a/bundles/smartd/metadata.py b/bundles/smartd/metadata.py index d44a5fc..72bbee7 100644 --- a/bundles/smartd/metadata.py +++ b/bundles/smartd/metadata.py @@ -24,7 +24,7 @@ def zfs_disks_to_metadata(metadata): disks = set() for config in metadata.get('zfs/pools', {}).values(): - for option in config: + for option in config['when_creating']['config']: if option.get('type', '') in {'log', 'cache'}: continue diff --git a/bundles/zfs/items.py b/bundles/zfs/items.py index 6eabb96..a990f6a 100644 --- a/bundles/zfs/items.py +++ b/bundles/zfs/items.py @@ -79,12 +79,10 @@ svc_systemd = { zfs_datasets = node.metadata.get('zfs/datasets', {}) for name, attrs in node.metadata.get('zfs/pools', {}).items(): - zfs_pools[name] = { - 'config': attrs, - } + zfs_pools[name] = attrs if node.os_version[0] > 10: - zfs_pools[name]['autotrim'] = True + zfs_pools[name]['autotrim'] = attrs.get('autotrim', True) directories = { "/etc/zfs-snapshot-backup-pre.d": { diff --git a/items/zfs_dataset.py b/items/zfs_dataset.py deleted file mode 100644 index badf217..0000000 --- a/items/zfs_dataset.py +++ /dev/null @@ -1,142 +0,0 @@ -from pipes import quote - -from bundlewrap.exceptions import BundleError -from bundlewrap.items import Item -from bundlewrap.utils.text import mark_for_translation as _ - - -def create(node, path, options): - option_list = [] - for option, value in sorted(options.items()): - # We must exclude the 'mounted' property here because it's a - # read-only "informational" property. - if option != 'mounted' and value is not None: - option_list.append("-o {}={}".format(quote(option), quote(value))) - option_args = " ".join(option_list) - - node.run( - "zfs create {} {}".format( - option_args, - quote(path), - ), - may_fail=True, - ) - - if options['mounted'] == 'no': - set_option(node, path, 'mounted', 'no') - - -def does_exist(node, path): - status_result = node.run( - "zfs list {}".format(quote(path)), - may_fail=True, - ) - return status_result.return_code == 0 - - -def get_option(node, path, option): - cmd = "zfs get -Hp -o value {} {}".format(quote(option), quote(path)) - # We always expect this to succeed since we don't call this function - # if we have already established that the dataset does not exist. - status_result = node.run(cmd) - return status_result.stdout.decode('utf-8').strip() - -def set_option(node, path, option, value): - if option == 'mounted': - # 'mounted' is a read-only property that can not be altered by - # 'set'. We need to call 'zfs mount tank/foo'. - node.run( - "zfs {} {}".format( - "mount" if value == 'yes' else "unmount", - quote(path), - ), - may_fail=True, - ) - else: - node.run( - "zfs set {}={} {}".format( - quote(option), - quote(value), - quote(path), - ), - may_fail=True, - ) - - -class ZFSDataset(Item): - """ - Creates ZFS datasets and manages their options. - """ - BUNDLE_ATTRIBUTE_NAME = "zfs_datasets" - ITEM_ATTRIBUTES = { - 'atime': None, - 'acltype': None, - 'compression': None, - 'mountpoint': None, - 'quota': None, - 'recordsize': None, - 'dedup': None, - } - ITEM_TYPE_NAME = "zfs_dataset" - - def __repr__(self): - return f"" - - def cdict(self): - cdict = {} - for option, value in self.attributes.items(): - if option == 'mountpoint' and value is None: - value = "none" - if value is not None: - cdict[option] = value - cdict['mounted'] = 'no' if cdict.get('mountpoint') in (None, "none") else 'yes' - return cdict - - def fix(self, status): - if status.must_be_created: - create(self.node, self.name, status.cdict) - else: - for option in status.keys_to_fix: - set_option(self.node, self.name, option, status.cdict[option]) - - def get_auto_deps(self, items): - pool = self.name.split("/")[0] - pool_item = "zfs_pool:{}".format(pool) - pool_item_found = False - - for item in items: - if item.ITEM_TYPE_NAME == "zfs_pool" and item.name == pool: - # Add dependency to the pool this dataset resides on. - pool_item_found = True - yield pool_item - elif ( - item.ITEM_TYPE_NAME == "zfs_dataset" and - self.name != item.name and - self.name.startswith(item.name + "/") - ): - # Find all other datasets that are parents of this - # dataset. - # XXX Could be optimized by finding the "largest" - # parent only. - yield item.id - - - if not pool_item_found: - raise BundleError(_( - "ZFS dataset {dataset} resides on pool {pool} but item " - "{dep} does not exist" - ).format( - dataset=self.name, - pool=pool, - dep=pool_item, - )) - - def sdict(self): - if not does_exist(self.node, self.name): - return None - - sdict = {} - for option, value in self.attributes.items(): - sdict[option] = get_option(self.node, self.name, option) - sdict['mounted'] = get_option(self.node, self.name, 'mounted') - return sdict diff --git a/items/zfs_pool.py b/items/zfs_pool.py deleted file mode 100644 index 82abdcb..0000000 --- a/items/zfs_pool.py +++ /dev/null @@ -1,169 +0,0 @@ -from collections import Counter -from pipes import quote - -from bundlewrap.exceptions import BundleError -from bundlewrap.items import Item -from bundlewrap.utils.text import mark_for_translation as _ - - -class ZFSPool(Item): - """ - Creates ZFS pools. - """ - BUNDLE_ATTRIBUTE_NAME = "zfs_pools" - ITEM_ATTRIBUTES = { - 'config': None, - 'autotrim': None, - 'autoreplace': None, - 'autoexpand': None, - 'ashift': None, - } - ITEM_TYPE_NAME = "zfs_pool" - - def __repr__(self): - return "".format( - self.name, - self.attributes['autoexpand'], - self.attributes['autoreplace'], - self.attributes['autotrim'], - self.attributes['ashift'], - self.attributes['config'], - ) - - def cdict(self): - ret = {} - # ashift can only be set at pool creation, that's why it's missing - # here. - for i in {'autoexpand', 'autoreplace', 'autotrim'}: - if self.attributes.get(i): - ret[i] = self.attributes[i] - return ret - - @property - def devices_used(self): - devices = set() - for option in self.attributes['config']: - for device in option['devices']: - devices.add(device) - return sorted(devices) - - def fix(self, status): - if status.must_be_created: - cmdline = [] - for option in self.attributes['config']: - if option.get('type'): - cmdline.append(option['type']) - if option['type'] == 'log' and len(option['devices']) > 1: - cmdline.append('mirror') - - for device in sorted(option['devices']): - res = node.run("lsblk -rndo fstype {}".format(quote(device))) - detected = res.stdout.decode('UTF-8').strip() - if detected != "": - raise BundleError(_("Node {}, ZFSPool {}: Device {} to be used for ZFS, but it is not empty! Has '{}'.").format(self.node.name, self.name, device, detected)) - - cmdline.append(quote(device)) - - options = set() - if self.attributes['ashift']: - options.add('-o ashift={}'.format(self.attributes['ashift'])) - - self.run('zpool create {} {} {}'.format( - ' '.join(sorted(options)), - quote(self.name), - ' '.join(cmdline), - )) - - for attr in status.keys_to_fix: - state_str = 'on' if status.cdict[attr] else 'off' - self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name))) - - def sdict(self): - status_result = self.run('zpool list {}'.format(quote(self.name)), may_fail=True) - if status_result.return_code != 0: - return {} - - pool_status = {} - for line in self.run('zpool get all -H -o all {}'.format(quote(self.name)), may_fail=True).stdout.decode().splitlines(): - try: - pname, prop, value, source = line.split() - pool_status[prop.strip()] = value.strip() - except (IndexError, ValueError): - continue - - return { - 'autoexpand': (pool_status.get('autoexpand') == 'on'), - 'autoreplace': (pool_status.get('autoreplace') == 'on'), - 'autotrim': (pool_status.get('autotrim') == 'on'), - } - - def test(self): - duplicate_devices = [ - item for item, count in Counter(self.devices_used).items() if count > 1 - ] - if duplicate_devices: - raise BundleError(_( - "{item} on node {node} uses {devices} more than once as an underlying device" - ).format( - item=self.id, - node=self.node.name, - devices=_(" and ").join(duplicate_devices), - )) - - # Have a look at all other ZFS pools on this node and check if - # multiple pools try to use the same device. - for item in self.node.items: - if ( - item.ITEM_TYPE_NAME == "zfs_pool" and - item.name != self.name and - set(item.devices_used).intersection(set(self.devices_used)) - ): - raise BundleError(_( - "Both the ZFS pools {self} and {other} on node {node} " - "try to use {devices} as the underlying storage device" - ).format( - self=self.name, - other=item.name, - node=self.node.name, - devices=_(" and ").join(set(item.devices_used).intersection(set(self.devices_used))), - )) - - @classmethod - def validate_attributes(cls, bundle, item_id, attributes): - if not isinstance(attributes['config'], list): - raise BundleError(_( - "{item} on node {node}: option 'config' must be a list" - ).format( - item=item_id, - node=bundle.node.name, - )) - - for config in attributes['config']: - if config.get('type', None) not in {None, 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}: - raise BundleError(_( - "{item} on node {node} has invalid type '{type}', " - "must be one of (unset), 'mirror', 'raidz', 'raidz2', " - "'raidz3', 'cache', 'log'" - ).format( - item=item_id, - node=bundle.node.name, - type=config['type'], - )) - - if not config.get('devices', set()): - raise BundleError(_( - "{item} on node {node} uses no devices!" - ).format( - item=item_id, - node=bundle.node.name, - )) - - if config.get('type') == 'log': - if not 0 < len(config['devices']) < 3: - raise BundleError(_( - "{item} on node {node} type 'log' must use exactly " - "one or two devices" - ).format( - item=item_id, - node=bundle.node.name, - )) diff --git a/nodes/home/nas.py b/nodes/home/nas.py index fb4053e..ef4c0ec 100644 --- a/nodes/home/nas.py +++ b/nodes/home/nas.py @@ -204,34 +204,38 @@ nodes['home.nas'] = { 'zfs_arc_max_gb': 8, }, 'pools': { - # Configured manually. Don't touch! - 'storage': [ - { - 'type': 'raidz2', - 'devices': { - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR', - '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR', - }, + 'storage': { + 'when_creating': { + 'config': [ + { + 'type': 'raidz2', + 'devices': { + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR', + '/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR', + }, + }, + { + 'type': 'log', + 'devices': { + '/dev/disk/by-id/ata-TS64GSSD370_B807810503-part1', + '/dev/disk/by-id/ata-TS64GSSD370_B807810527-part1', + }, + }, + { + 'type': 'cache', + 'devices': { + '/dev/disk/by-id/ata-TS64GSSD370_B807810503-part2', + '/dev/disk/by-id/ata-TS64GSSD370_B807810527-part2', + }, + }, + ], + 'ashift': 12, }, - { - 'type': 'log', - 'devices': { - '/dev/disk/by-id/ata-TS64GSSD370_B807810503-part1', - '/dev/disk/by-id/ata-TS64GSSD370_B807810527-part1', - }, - }, - { - 'type': 'cache', - 'devices': { - '/dev/disk/by-id/ata-TS64GSSD370_B807810503-part2', - '/dev/disk/by-id/ata-TS64GSSD370_B807810527-part2', - }, - }, - ] + }, }, 'datasets': { 'storage/backups': {}, diff --git a/nodes/htz-cloud/influxdb.py b/nodes/htz-cloud/influxdb.py index 4b4a5e4..f3eea5c 100644 --- a/nodes/htz-cloud/influxdb.py +++ b/nodes/htz-cloud/influxdb.py @@ -57,9 +57,13 @@ nodes['htz-cloud.influxdb'] = { }, 'zfs': { 'pools': { - 'tank': [{ - 'devices': {'/dev/sdb'}, - }], + 'tank': { + 'when_creating': { + 'config': [{ + 'devices': {'/dev/sdb'}, + }], + }, + }, }, }, }, diff --git a/nodes/htz-cloud/luther.py b/nodes/htz-cloud/luther.py index 7bc79fd..c424ade 100644 --- a/nodes/htz-cloud/luther.py +++ b/nodes/htz-cloud/luther.py @@ -80,9 +80,13 @@ nodes['htz-cloud.luther'] = { }, 'zfs': { 'pools': { - 'tank': [{ - 'devices': {'/dev/sdb'}, - }], + 'tank': { + 'when_creating': { + 'config': [{ + 'devices': {'/dev/sdb'}, + }], + }, + }, }, 'datasets': { 'tank/luther-website': { diff --git a/nodes/htz-cloud/pleroma.py b/nodes/htz-cloud/pleroma.py index 6c0833f..dea0500 100644 --- a/nodes/htz-cloud/pleroma.py +++ b/nodes/htz-cloud/pleroma.py @@ -66,9 +66,13 @@ nodes['htz-cloud.pleroma'] = { }, 'zfs': { 'pools': { - 'tank': [{ - 'devices': {'/dev/sdb'}, - }], + 'tank': { + 'when_creating': { + 'config': [{ + 'devices': {'/dev/sdb'}, + }], + }, + }, }, }, }, diff --git a/nodes/htz-cloud/sewfile.py b/nodes/htz-cloud/sewfile.py index b187d1b..95cdcf3 100644 --- a/nodes/htz-cloud/sewfile.py +++ b/nodes/htz-cloud/sewfile.py @@ -69,9 +69,13 @@ nodes['htz-cloud.sewfile'] = { }, 'zfs': { 'pools': { - 'tank': [{ - 'devices': {'/dev/sdb'}, - }], + 'tank': { + 'when_creating': { + 'config': [{ + 'devices': {'/dev/sdb'}, + }], + }, + }, }, 'datasets': { 'tank/mysql': { diff --git a/nodes/ovh/icinga2.py b/nodes/ovh/icinga2.py index 6003d4b..9232c24 100644 --- a/nodes/ovh/icinga2.py +++ b/nodes/ovh/icinga2.py @@ -133,9 +133,15 @@ nodes['ovh.icinga2'] = { }, 'zfs': { 'pools': { - 'tank': [{ - 'devices': {'/dev/sdb'}, - }], + 'tank': { + 'when_creating': { + 'config': [{ + 'devices': { + '/dev/sdb' + }, + }], + }, + }, }, }, 'vm': { diff --git a/nodes/rx300.py b/nodes/rx300.py index 5966639..a15cbec 100644 --- a/nodes/rx300.py +++ b/nodes/rx300.py @@ -481,15 +481,20 @@ nodes['rx300'] = { 'zfs_arc_max_gb': 16, }, 'pools': { - 'tank': [{ - 'type': 'raidz', - 'devices': { - '/dev/sda', - '/dev/sdb', - '/dev/sdc', - '/dev/sdd', + 'tank': { + 'when_creating': { + 'config': [{ + 'type': 'raidz', + 'devices': { + '/dev/sda', + '/dev/sdb', + '/dev/sdc', + '/dev/sdd', + }, + }], + 'ashift': 12, }, - }], + }, }, 'datasets': { 'tank/libvirt': { diff --git a/requirements.txt b/requirements.txt index 39bf1dc..84693d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ -bundlewrap>=4.9.0 +bundlewrap~=4.11.2 PyNaCl bundlewrap-pass