items/zfs_pool: rewrite item to support all kinds of zfs pools
All checks were successful
kunsi/bundlewrap/pipeline/head This commit looks good
All checks were successful
kunsi/bundlewrap/pipeline/head This commit looks good
This commit is contained in:
parent
9111d49bf4
commit
cee2a41771
10 changed files with 134 additions and 117 deletions
|
@ -23,13 +23,13 @@ defaults = {
|
|||
def zfs_disks_to_metadata(metadata):
|
||||
disks = set()
|
||||
|
||||
for _, config in metadata.get('zfs/pools', {}).items():
|
||||
if 'device' in config:
|
||||
disks.add(config['device'])
|
||||
else:
|
||||
for t in {'mirror', 'raidz', 'raidz2', 'raidz3'}:
|
||||
for device in config.get(t, set()):
|
||||
disks.add(device)
|
||||
for config in metadata.get('zfs/pools', {}).values():
|
||||
for option in config:
|
||||
if option.get('type', '') in {'log', 'cache'}:
|
||||
continue
|
||||
|
||||
for disk in option['devices']:
|
||||
disks.add(disk)
|
||||
|
||||
return {
|
||||
'smartd': {
|
||||
|
|
|
@ -79,16 +79,12 @@ svc_systemd = {
|
|||
zfs_datasets = node.metadata.get('zfs/datasets', {})
|
||||
|
||||
for name, attrs in node.metadata.get('zfs/pools', {}).items():
|
||||
zfs_pools[name] = attrs
|
||||
zfs_pools[name] = {
|
||||
'config': attrs,
|
||||
}
|
||||
|
||||
if node.os_version[0] > 10:
|
||||
actions[f'pool_{name}_enable_trim'] = {
|
||||
'command': f'zpool set autotrim=on {name}',
|
||||
'unless': f'zpool get autotrim -H -o value {name} | grep -q on',
|
||||
'needs': [
|
||||
f'zfs_pool:{name}'
|
||||
]
|
||||
}
|
||||
zfs_pools[name]['autotrim'] = True
|
||||
|
||||
directories = {
|
||||
"/etc/zfs-snapshot-backup-pre.d": {
|
||||
|
|
|
@ -6,43 +6,6 @@ from bundlewrap.items import Item
|
|||
from bundlewrap.utils.text import mark_for_translation as _
|
||||
|
||||
|
||||
def create_mirrors(node, path, mirrors):
|
||||
cmd = ""
|
||||
for devices in mirrors:
|
||||
actual_targets = []
|
||||
for device in devices:
|
||||
actual_targets.append(quote(prepare_blockdevice(node, device)))
|
||||
cmd += "mirror {} ".format(" ".join(actual_targets))
|
||||
|
||||
node.run("zpool create {} {}".format(quote(path), cmd))
|
||||
node.run("zfs unmount {}".format(quote(path)))
|
||||
|
||||
|
||||
def create_raidz(node, path, devices, raid='raidz'):
|
||||
cmd = ""
|
||||
actual_targets = []
|
||||
for device in devices:
|
||||
actual_targets.append(quote(prepare_blockdevice(node, device)))
|
||||
cmd += "{} {} ".format(raid, " ".join(actual_targets))
|
||||
|
||||
node.run("zpool create {} {}".format(quote(path), cmd))
|
||||
node.run("zfs unmount {}".format(quote(path)))
|
||||
|
||||
|
||||
def create_single(node, path, device):
|
||||
actual_target = prepare_blockdevice(node, device)
|
||||
node.run("zpool create {} {}".format(quote(path), quote(actual_target)))
|
||||
node.run("zfs unmount {}".format(quote(path)))
|
||||
|
||||
|
||||
def does_exist(node, path):
|
||||
status_result = node.run(
|
||||
"zpool list {}".format(quote(path)),
|
||||
may_fail=True,
|
||||
)
|
||||
return status_result.return_code == 0
|
||||
|
||||
|
||||
def prepare_blockdevice(node, device):
|
||||
# To increase our chances of success, we run partprobe beforehand to
|
||||
# make the kernel re-scan all devices.
|
||||
|
@ -92,53 +55,68 @@ class ZFSPool(Item):
|
|||
"""
|
||||
BUNDLE_ATTRIBUTE_NAME = "zfs_pools"
|
||||
ITEM_ATTRIBUTES = {
|
||||
'device': None,
|
||||
'mirrors': None,
|
||||
'raidz': None,
|
||||
'raidz2': None,
|
||||
'raidz3': None,
|
||||
'config': None,
|
||||
'autotrim': None,
|
||||
'autoreplace': None,
|
||||
'autoexpand': None,
|
||||
}
|
||||
ITEM_TYPE_NAME = "zfs_pool"
|
||||
|
||||
def __repr__(self):
|
||||
return "<ZFSPool name:{} device:{} mirrors:{} raidz:{}>".format(
|
||||
return "<ZFSPool name:{} autoexpand:{} autoreplace:{} autotrim:{} config:{}>".format(
|
||||
self.name,
|
||||
self.attributes['device'],
|
||||
self.attributes['mirrors'],
|
||||
self.attributes['raidz'],
|
||||
self.attributes['autoexpand'],
|
||||
self.attributes['autoreplace'],
|
||||
self.attributes['autotrim'],
|
||||
self.attributes['config'],
|
||||
)
|
||||
|
||||
def cdict(self):
|
||||
return {}
|
||||
ret = {}
|
||||
for i in {'autoexpand', 'autoreplace', 'autotrim'}:
|
||||
if self.attributes.get(i):
|
||||
ret[i] = self.attributes[i]
|
||||
return ret
|
||||
|
||||
@property
|
||||
def devices_used(self):
|
||||
devices = []
|
||||
if self.attributes['device'] is not None:
|
||||
devices.append(self.attributes['device'])
|
||||
if self.attributes['mirrors'] is not None:
|
||||
for mirror in self.attributes['mirrors']:
|
||||
devices.extend(mirror)
|
||||
if self.attributes['raidz'] is not None:
|
||||
devices.extend(self.attributes['raidz'])
|
||||
return devices
|
||||
devices = set()
|
||||
for option in self.attributes['config']:
|
||||
for device in option['devices']:
|
||||
devices.add(device)
|
||||
return sorted(devices)
|
||||
|
||||
def fix(self, status):
|
||||
if status.must_be_created:
|
||||
if self.attributes['device'] is not None:
|
||||
create_single(self.node, self.name, self.attributes['device'])
|
||||
elif self.attributes['mirrors'] is not None:
|
||||
create_mirrors(self.node, self.name, self.attributes['mirrors'])
|
||||
elif self.attributes['raidz'] is not None:
|
||||
create_raidz(self.node, self.name, self.attributes['raidz'])
|
||||
elif self.attributes['raidz2'] is not None:
|
||||
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz2')
|
||||
elif self.attributes['raidz2'] is not None:
|
||||
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz3')
|
||||
cmdline = []
|
||||
for option in self.attributes['config']:
|
||||
if option.get('type'):
|
||||
cmdline.append(option['type'])
|
||||
if option['type'] == 'log' and len(option['devices']) > 1:
|
||||
cmdline.append('mirror')
|
||||
for device in sorted(option['devices']):
|
||||
cmdline.append(quote(prepare_blockdevice(node, device)))
|
||||
|
||||
self.run('zpool create {} {}'.format(quote(self.name), ' '.join(cmdline)))
|
||||
|
||||
for attr in {'autoexpand', 'autoreplace', 'autotrim'}:
|
||||
if attr in status.keys_to_fix:
|
||||
state_str = 'on' if status.cdict[attr] else 'off'
|
||||
self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name)))
|
||||
|
||||
def sdict(self):
|
||||
# We don't care about the device if the pool already exists.
|
||||
return {} if does_exist(self.node, self.name) else None
|
||||
status_result = self.run('zpool list {}'.format(quote(self.name)), may_fail=True)
|
||||
if status_result.return_code != 0:
|
||||
return {}
|
||||
|
||||
autoexpand_state = self.run('zpool get autoexpand -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||
autoreplace_state = self.run('zpool get autoreplace -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||
autotrim_state = self.run('zpool get autotrim -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||
return {
|
||||
'autoexpand': (autoexpand_state == 'on'),
|
||||
'autoreplace': (autoreplace_state == 'on'),
|
||||
'autotrim': (autotrim_state == 'on'),
|
||||
}
|
||||
|
||||
def test(self):
|
||||
duplicate_devices = [
|
||||
|
@ -173,15 +151,40 @@ class ZFSPool(Item):
|
|||
|
||||
@classmethod
|
||||
def validate_attributes(cls, bundle, item_id, attributes):
|
||||
device_config = []
|
||||
for key in ('device', 'mirrors', 'raidz', 'raidz2', 'raidz3'):
|
||||
device_config.append(attributes.get(key))
|
||||
device_config = [key for key in device_config if key is not None]
|
||||
if len(device_config) != 1:
|
||||
if not isinstance(attributes['config'], list):
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} must have exactly one of "
|
||||
"'device', 'mirrors', 'raidz', 'raidz2' or 'raidz3'"
|
||||
"{item} on node {node} 'config'"
|
||||
).format(
|
||||
item=item_id,
|
||||
node=bundle.node.name,
|
||||
))
|
||||
|
||||
for config in attributes['config']:
|
||||
if not config.get('type', '') in {'', 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}:
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} has invalid type '{type}', "
|
||||
"must be one of (unset), 'mirror', 'raidz', 'raidz2', "
|
||||
"'raidz3', 'cache', 'log'"
|
||||
).format(
|
||||
item=item_id,
|
||||
node=bundle.node.name,
|
||||
type=config['type'],
|
||||
))
|
||||
|
||||
if not config.get('devices', set()):
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} uses no devices!"
|
||||
).format(
|
||||
item=item_id,
|
||||
node=bundle.node.name,
|
||||
))
|
||||
|
||||
if config.get('type') == 'log':
|
||||
if not 0 < len(config['devices']) < 3:
|
||||
raise BundleError(_(
|
||||
"{item} on node {node} type 'log' must use exactly "
|
||||
"one or two devices"
|
||||
).format(
|
||||
item=item_id,
|
||||
node=bundle.node.name,
|
||||
))
|
||||
|
|
|
@ -197,16 +197,33 @@ nodes['home.nas'] = {
|
|||
},
|
||||
'pools': {
|
||||
# Configured manually. Don't touch!
|
||||
'storage': {
|
||||
'raidz2': {
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR',
|
||||
'storage': [
|
||||
{
|
||||
'type': 'raidz2',
|
||||
'devices': {
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
'type': 'log',
|
||||
'devices': {
|
||||
'/dev/disk/by-id/ata-TS64GSSD370_B807810503-part1',
|
||||
'/dev/disk/by-id/ata-TS64GSSD370_B807810527-part1',
|
||||
},
|
||||
},
|
||||
{
|
||||
'type': 'cache',
|
||||
'devices': {
|
||||
'/dev/disk/by-id/ata-TS64GSSD370_B807810503-part2',
|
||||
'/dev/disk/by-id/ata-TS64GSSD370_B807810527-part2',
|
||||
},
|
||||
},
|
||||
]
|
||||
},
|
||||
'datasets': {
|
||||
'storage/backups': {},
|
||||
|
|
|
@ -57,9 +57,9 @@ nodes['htz-cloud.influxdb'] = {
|
|||
},
|
||||
'zfs': {
|
||||
'pools': {
|
||||
'tank': {
|
||||
'device': '/dev/sdb',
|
||||
},
|
||||
'tank': [{
|
||||
'devices': {'/dev/sdb'},
|
||||
}],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -80,9 +80,9 @@ nodes['htz-cloud.luther'] = {
|
|||
},
|
||||
'zfs': {
|
||||
'pools': {
|
||||
'tank': {
|
||||
'device': '/dev/sdb',
|
||||
},
|
||||
'tank': [{
|
||||
'devices': {'/dev/sdb'},
|
||||
}],
|
||||
},
|
||||
'datasets': {
|
||||
'tank/luther-website': {
|
||||
|
|
|
@ -66,9 +66,9 @@ nodes['htz-cloud.pleroma'] = {
|
|||
},
|
||||
'zfs': {
|
||||
'pools': {
|
||||
'tank': {
|
||||
'device': '/dev/sdb',
|
||||
},
|
||||
'tank': [{
|
||||
'devices': {'/dev/sdb'},
|
||||
}],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -69,9 +69,9 @@ nodes['htz-cloud.sewfile'] = {
|
|||
},
|
||||
'zfs': {
|
||||
'pools': {
|
||||
'tank': {
|
||||
'device': '/dev/sdb',
|
||||
},
|
||||
'tank': [{
|
||||
'devices': {'/dev/sdb'},
|
||||
}],
|
||||
},
|
||||
'datasets': {
|
||||
'tank/mysql': {
|
||||
|
|
|
@ -133,9 +133,9 @@ nodes['ovh.icinga2'] = {
|
|||
},
|
||||
'zfs': {
|
||||
'pools': {
|
||||
'tank': {
|
||||
'device': '/dev/sdb',
|
||||
},
|
||||
'tank': [{
|
||||
'devices': {'/dev/sdb'},
|
||||
}],
|
||||
},
|
||||
},
|
||||
'vm': {
|
||||
|
|
|
@ -310,14 +310,15 @@ nodes['rx300'] = {
|
|||
'zfs_arc_max_gb': 16,
|
||||
},
|
||||
'pools': {
|
||||
'tank': {
|
||||
'raidz': {
|
||||
'tank': [{
|
||||
'type': 'raidz',
|
||||
'devices': {
|
||||
'/dev/sda',
|
||||
'/dev/sdb',
|
||||
'/dev/sdc',
|
||||
'/dev/sdd',
|
||||
},
|
||||
},
|
||||
}],
|
||||
},
|
||||
'datasets': {
|
||||
'tank/libvirt': {
|
||||
|
|
Loading…
Reference in a new issue