items/zfs_*: improve
All checks were successful
kunsi/bundlewrap/pipeline/head This commit looks good
All checks were successful
kunsi/bundlewrap/pipeline/head This commit looks good
This commit is contained in:
parent
9f8878ae8f
commit
1ae328d8f3
2 changed files with 38 additions and 69 deletions
|
@ -120,16 +120,6 @@ class ZFSDataset(Item):
|
|||
# parent only.
|
||||
yield item.id
|
||||
|
||||
# XXX This populates 'needs', not 'needed_by'. We have opened
|
||||
# an issue: https://github.com/bundlewrap/bundlewrap/issues/648
|
||||
# elif self.attributes.get('mountpoint'):
|
||||
# for item_type in ['directory', 'file', 'git_deploy']:
|
||||
# if (
|
||||
# item.ITEM_TYPE_NAME == item_type and
|
||||
# item.name.startswith('{}:{}'.format(item_type, self.attributes['mountpoint']))
|
||||
# ):
|
||||
# yield item.id
|
||||
|
||||
|
||||
if not pool_item_found:
|
||||
raise BundleError(_(
|
||||
|
|
|
@ -6,52 +6,9 @@ from bundlewrap.items import Item
|
|||
from bundlewrap.utils.text import mark_for_translation as _
|
||||
|
||||
|
||||
def prepare_blockdevice(node, device):
|
||||
# To increase our chances of success, we run partprobe beforehand to
|
||||
# make the kernel re-scan all devices.
|
||||
node.run("partprobe", may_fail=True)
|
||||
|
||||
# Try to find out if the device already contains some filesystem.
|
||||
# Please note that there is no 100% reliable way to do this.
|
||||
res = node.run("lsblk -rndo fstype {}".format(quote(device)))
|
||||
detected = res.stdout.decode('UTF-8').strip()
|
||||
if detected != "":
|
||||
raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format(
|
||||
device, detected))
|
||||
|
||||
res = node.run("lsblk -rndo type {}".format(device))
|
||||
if res.stdout.decode('UTF-8').strip() == "disk":
|
||||
# We create a new partition on a raw disk. That partition will
|
||||
# be used for ZFS. Yes, this is on purpose. No, creating a pool
|
||||
# on raw disks does not work properly on Linux.
|
||||
node.run("parted -s {} mklabel gpt".format(quote(device)))
|
||||
node.run("parted -s {} mkpart -a optimal primary 0% 100%".format(quote(device)))
|
||||
node.run("partprobe")
|
||||
|
||||
# Simply append a "1" to get to the first partition.
|
||||
#
|
||||
# XXX I know that this fails if you're using /dev/disk/by-*.
|
||||
# Yes, this is a problem if your device names are not
|
||||
# predictable. Yes, we could use "lsblk" to try to find the
|
||||
# first partition ... but "lsblk" still reports it as
|
||||
# "/dev/vdb1" instead of "/dev/disk/by-foo/bar-part1".
|
||||
#
|
||||
# This is an unsolved problem. Please configure your VMs to use
|
||||
# predictable device names.
|
||||
if device.find('nvme') != -1: # NVME Devices have the partitions with the prefix pX
|
||||
partition = "{}p1".format(device)
|
||||
|
||||
else:
|
||||
partition = "{}1".format(device)
|
||||
|
||||
return partition
|
||||
else:
|
||||
return device
|
||||
|
||||
|
||||
class ZFSPool(Item):
|
||||
"""
|
||||
Creates ZFS pools and the required partitions.
|
||||
Creates ZFS pools.
|
||||
"""
|
||||
BUNDLE_ATTRIBUTE_NAME = "zfs_pools"
|
||||
ITEM_ATTRIBUTES = {
|
||||
|
@ -59,20 +16,24 @@ class ZFSPool(Item):
|
|||
'autotrim': None,
|
||||
'autoreplace': None,
|
||||
'autoexpand': None,
|
||||
'ashift': None,
|
||||
}
|
||||
ITEM_TYPE_NAME = "zfs_pool"
|
||||
|
||||
def __repr__(self):
|
||||
return "<ZFSPool name:{} autoexpand:{} autoreplace:{} autotrim:{} config:{}>".format(
|
||||
return "<ZFSPool name:{} autoexpand:{} autoreplace:{} autotrim:{} ashift:{} config:{}>".format(
|
||||
self.name,
|
||||
self.attributes['autoexpand'],
|
||||
self.attributes['autoreplace'],
|
||||
self.attributes['autotrim'],
|
||||
self.attributes['ashift'],
|
||||
self.attributes['config'],
|
||||
)
|
||||
|
||||
def cdict(self):
|
||||
ret = {}
|
||||
# ashift can only be set at pool creation, that's why it's missing
|
||||
# here.
|
||||
for i in {'autoexpand', 'autoreplace', 'autotrim'}:
|
||||
if self.attributes.get(i):
|
||||
ret[i] = self.attributes[i]
|
||||
|
@ -94,13 +55,26 @@ class ZFSPool(Item):
|
|||
cmdline.append(option['type'])
|
||||
if option['type'] == 'log' and len(option['devices']) > 1:
|
||||
cmdline.append('mirror')
|
||||
|
||||
for device in sorted(option['devices']):
|
||||
cmdline.append(quote(prepare_blockdevice(node, device)))
|
||||
res = node.run("lsblk -rndo fstype {}".format(quote(device)))
|
||||
detected = res.stdout.decode('UTF-8').strip()
|
||||
if detected != "":
|
||||
raise BundleError(_("Node {}, ZFSPool {}: Device {} to be used for ZFS, but it is not empty! Has '{}'.").format(self.node.name, self.name, device, detected))
|
||||
|
||||
self.run('zpool create {} {}'.format(quote(self.name), ' '.join(cmdline)))
|
||||
cmdline.append(quote(device))
|
||||
|
||||
for attr in {'autoexpand', 'autoreplace', 'autotrim'}:
|
||||
if attr in status.keys_to_fix:
|
||||
options = set()
|
||||
if self.attributes['ashift']:
|
||||
options.add('-o ashift={}'.format(self.attributes['ashift']))
|
||||
|
||||
self.run('zpool create {} {} {}'.format(
|
||||
' '.join(sorted(options)),
|
||||
quote(self.name),
|
||||
' '.join(cmdline),
|
||||
))
|
||||
|
||||
for attr in status.keys_to_fix:
|
||||
state_str = 'on' if status.cdict[attr] else 'off'
|
||||
self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name)))
|
||||
|
||||
|
@ -109,13 +83,18 @@ class ZFSPool(Item):
|
|||
if status_result.return_code != 0:
|
||||
return {}
|
||||
|
||||
autoexpand_state = self.run('zpool get autoexpand -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||
autoreplace_state = self.run('zpool get autoreplace -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||
autotrim_state = self.run('zpool get autotrim -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||
pool_status = {}
|
||||
for line in self.run('zpool get all -H -o all {}'.format(quote(self.name)), may_fail=True).stdout.decode().splitlines():
|
||||
try:
|
||||
pname, prop, value, source = line.split()
|
||||
pool_status[prop.strip()] = value.strip()
|
||||
except (IndexError, ValueError):
|
||||
continue
|
||||
|
||||
return {
|
||||
'autoexpand': (autoexpand_state == 'on'),
|
||||
'autoreplace': (autoreplace_state == 'on'),
|
||||
'autotrim': (autotrim_state == 'on'),
|
||||
'autoexpand': (pool_status.get('autoexpand') == 'on'),
|
||||
'autoreplace': (pool_status.get('autoreplace') == 'on'),
|
||||
'autotrim': (pool_status.get('autotrim') == 'on'),
|
||||
}
|
||||
|
||||
def test(self):
|
||||
|
@ -153,14 +132,14 @@ class ZFSPool(Item):
|
|||
def validate_attributes(cls, bundle, item_id, attributes):
|
||||
if not isinstance(attributes['config'], list):
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} 'config'"
|
||||
"{item} on node {node}: option 'config' must be a list"
|
||||
).format(
|
||||
item=item_id,
|
||||
node=bundle.node.name,
|
||||
))
|
||||
|
||||
for config in attributes['config']:
|
||||
if not config.get('type', '') in {'', 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}:
|
||||
if config.get('type', None) not in {None, 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}:
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} has invalid type '{type}', "
|
||||
"must be one of (unset), 'mirror', 'raidz', 'raidz2', "
|
||||
|
|
Loading…
Reference in a new issue