from collections import Counter from pipes import quote from bundlewrap.exceptions import BundleError from bundlewrap.items import Item from bundlewrap.utils.text import mark_for_translation as _ def prepare_blockdevice(node, device): # To increase our chances of success, we run partprobe beforehand to # make the kernel re-scan all devices. node.run("partprobe", may_fail=True) # Try to find out if the device already contains some filesystem. # Please note that there is no 100% reliable way to do this. res = node.run("lsblk -rndo fstype {}".format(quote(device))) detected = res.stdout.decode('UTF-8').strip() if detected != "": raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format( device, detected)) res = node.run("lsblk -rndo type {}".format(device)) if res.stdout.decode('UTF-8').strip() == "disk": # We create a new partition on a raw disk. That partition will # be used for ZFS. Yes, this is on purpose. No, creating a pool # on raw disks does not work properly on Linux. node.run("parted -s {} mklabel gpt".format(quote(device))) node.run("parted -s {} mkpart -a optimal primary 0% 100%".format(quote(device))) node.run("partprobe") # Simply append a "1" to get to the first partition. # # XXX I know that this fails if you're using /dev/disk/by-*. # Yes, this is a problem if your device names are not # predictable. Yes, we could use "lsblk" to try to find the # first partition ... but "lsblk" still reports it as # "/dev/vdb1" instead of "/dev/disk/by-foo/bar-part1". # # This is an unsolved problem. Please configure your VMs to use # predictable device names. if device.find('nvme') != -1: # NVME Devices have the partitions with the prefix pX partition = "{}p1".format(device) else: partition = "{}1".format(device) return partition else: return device class ZFSPool(Item): """ Creates ZFS pools and the required partitions. """ BUNDLE_ATTRIBUTE_NAME = "zfs_pools" ITEM_ATTRIBUTES = { 'config': None, 'autotrim': None, 'autoreplace': None, 'autoexpand': None, } ITEM_TYPE_NAME = "zfs_pool" def __repr__(self): return "".format( self.name, self.attributes['autoexpand'], self.attributes['autoreplace'], self.attributes['autotrim'], self.attributes['config'], ) def cdict(self): ret = {} for i in {'autoexpand', 'autoreplace', 'autotrim'}: if self.attributes.get(i): ret[i] = self.attributes[i] return ret @property def devices_used(self): devices = set() for option in self.attributes['config']: for device in option['devices']: devices.add(device) return sorted(devices) def fix(self, status): if status.must_be_created: cmdline = [] for option in self.attributes['config']: if option.get('type'): cmdline.append(option['type']) if option['type'] == 'log' and len(option['devices']) > 1: cmdline.append('mirror') for device in sorted(option['devices']): cmdline.append(quote(prepare_blockdevice(node, device))) self.run('zpool create {} {}'.format(quote(self.name), ' '.join(cmdline))) for attr in {'autoexpand', 'autoreplace', 'autotrim'}: if attr in status.keys_to_fix: state_str = 'on' if status.cdict[attr] else 'off' self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name))) def sdict(self): status_result = self.run('zpool list {}'.format(quote(self.name)), may_fail=True) if status_result.return_code != 0: return {} autoexpand_state = self.run('zpool get autoexpand -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() autoreplace_state = self.run('zpool get autoreplace -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() autotrim_state = self.run('zpool get autotrim -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip() return { 'autoexpand': (autoexpand_state == 'on'), 'autoreplace': (autoreplace_state == 'on'), 'autotrim': (autotrim_state == 'on'), } def test(self): duplicate_devices = [ item for item, count in Counter(self.devices_used).items() if count > 1 ] if duplicate_devices: raise BundleError(_( "{item} on node {node} uses {devices} more than once as an underlying device" ).format( item=self.id, node=self.node.name, devices=_(" and ").join(duplicate_devices), )) # Have a look at all other ZFS pools on this node and check if # multiple pools try to use the same device. for item in self.node.items: if ( item.ITEM_TYPE_NAME == "zfs_pool" and item.name != self.name and set(item.devices_used).intersection(set(self.devices_used)) ): raise BundleError(_( "Both the ZFS pools {self} and {other} on node {node} " "try to use {devices} as the underlying storage device" ).format( self=self.name, other=item.name, node=self.node.name, devices=_(" and ").join(set(item.devices_used).intersection(set(self.devices_used))), )) @classmethod def validate_attributes(cls, bundle, item_id, attributes): if not isinstance(attributes['config'], list): raise BundleError(_( "{item} on node {node} 'config'" ).format( item=item_id, node=bundle.node.name, )) for config in attributes['config']: if not config.get('type', '') in {'', 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}: raise BundleError(_( "{item} on node {node} has invalid type '{type}', " "must be one of (unset), 'mirror', 'raidz', 'raidz2', " "'raidz3', 'cache', 'log'" ).format( item=item_id, node=bundle.node.name, type=config['type'], )) if not config.get('devices', set()): raise BundleError(_( "{item} on node {node} uses no devices!" ).format( item=item_id, node=bundle.node.name, )) if config.get('type') == 'log': if not 0 < len(config['devices']) < 3: raise BundleError(_( "{item} on node {node} type 'log' must use exactly " "one or two devices" ).format( item=item_id, node=bundle.node.name, ))