items/zfs_pool: rewrite item to support all kinds of zfs pools
All checks were successful
kunsi/bundlewrap/pipeline/head This commit looks good
All checks were successful
kunsi/bundlewrap/pipeline/head This commit looks good
This commit is contained in:
parent
9111d49bf4
commit
cee2a41771
10 changed files with 134 additions and 117 deletions
|
@ -23,13 +23,13 @@ defaults = {
|
||||||
def zfs_disks_to_metadata(metadata):
|
def zfs_disks_to_metadata(metadata):
|
||||||
disks = set()
|
disks = set()
|
||||||
|
|
||||||
for _, config in metadata.get('zfs/pools', {}).items():
|
for config in metadata.get('zfs/pools', {}).values():
|
||||||
if 'device' in config:
|
for option in config:
|
||||||
disks.add(config['device'])
|
if option.get('type', '') in {'log', 'cache'}:
|
||||||
else:
|
continue
|
||||||
for t in {'mirror', 'raidz', 'raidz2', 'raidz3'}:
|
|
||||||
for device in config.get(t, set()):
|
for disk in option['devices']:
|
||||||
disks.add(device)
|
disks.add(disk)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'smartd': {
|
'smartd': {
|
||||||
|
|
|
@ -79,16 +79,12 @@ svc_systemd = {
|
||||||
zfs_datasets = node.metadata.get('zfs/datasets', {})
|
zfs_datasets = node.metadata.get('zfs/datasets', {})
|
||||||
|
|
||||||
for name, attrs in node.metadata.get('zfs/pools', {}).items():
|
for name, attrs in node.metadata.get('zfs/pools', {}).items():
|
||||||
zfs_pools[name] = attrs
|
zfs_pools[name] = {
|
||||||
|
'config': attrs,
|
||||||
|
}
|
||||||
|
|
||||||
if node.os_version[0] > 10:
|
if node.os_version[0] > 10:
|
||||||
actions[f'pool_{name}_enable_trim'] = {
|
zfs_pools[name]['autotrim'] = True
|
||||||
'command': f'zpool set autotrim=on {name}',
|
|
||||||
'unless': f'zpool get autotrim -H -o value {name} | grep -q on',
|
|
||||||
'needs': [
|
|
||||||
f'zfs_pool:{name}'
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
directories = {
|
directories = {
|
||||||
"/etc/zfs-snapshot-backup-pre.d": {
|
"/etc/zfs-snapshot-backup-pre.d": {
|
||||||
|
|
|
@ -6,43 +6,6 @@ from bundlewrap.items import Item
|
||||||
from bundlewrap.utils.text import mark_for_translation as _
|
from bundlewrap.utils.text import mark_for_translation as _
|
||||||
|
|
||||||
|
|
||||||
def create_mirrors(node, path, mirrors):
|
|
||||||
cmd = ""
|
|
||||||
for devices in mirrors:
|
|
||||||
actual_targets = []
|
|
||||||
for device in devices:
|
|
||||||
actual_targets.append(quote(prepare_blockdevice(node, device)))
|
|
||||||
cmd += "mirror {} ".format(" ".join(actual_targets))
|
|
||||||
|
|
||||||
node.run("zpool create {} {}".format(quote(path), cmd))
|
|
||||||
node.run("zfs unmount {}".format(quote(path)))
|
|
||||||
|
|
||||||
|
|
||||||
def create_raidz(node, path, devices, raid='raidz'):
|
|
||||||
cmd = ""
|
|
||||||
actual_targets = []
|
|
||||||
for device in devices:
|
|
||||||
actual_targets.append(quote(prepare_blockdevice(node, device)))
|
|
||||||
cmd += "{} {} ".format(raid, " ".join(actual_targets))
|
|
||||||
|
|
||||||
node.run("zpool create {} {}".format(quote(path), cmd))
|
|
||||||
node.run("zfs unmount {}".format(quote(path)))
|
|
||||||
|
|
||||||
|
|
||||||
def create_single(node, path, device):
|
|
||||||
actual_target = prepare_blockdevice(node, device)
|
|
||||||
node.run("zpool create {} {}".format(quote(path), quote(actual_target)))
|
|
||||||
node.run("zfs unmount {}".format(quote(path)))
|
|
||||||
|
|
||||||
|
|
||||||
def does_exist(node, path):
|
|
||||||
status_result = node.run(
|
|
||||||
"zpool list {}".format(quote(path)),
|
|
||||||
may_fail=True,
|
|
||||||
)
|
|
||||||
return status_result.return_code == 0
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_blockdevice(node, device):
|
def prepare_blockdevice(node, device):
|
||||||
# To increase our chances of success, we run partprobe beforehand to
|
# To increase our chances of success, we run partprobe beforehand to
|
||||||
# make the kernel re-scan all devices.
|
# make the kernel re-scan all devices.
|
||||||
|
@ -92,53 +55,68 @@ class ZFSPool(Item):
|
||||||
"""
|
"""
|
||||||
BUNDLE_ATTRIBUTE_NAME = "zfs_pools"
|
BUNDLE_ATTRIBUTE_NAME = "zfs_pools"
|
||||||
ITEM_ATTRIBUTES = {
|
ITEM_ATTRIBUTES = {
|
||||||
'device': None,
|
'config': None,
|
||||||
'mirrors': None,
|
'autotrim': None,
|
||||||
'raidz': None,
|
'autoreplace': None,
|
||||||
'raidz2': None,
|
'autoexpand': None,
|
||||||
'raidz3': None,
|
|
||||||
}
|
}
|
||||||
ITEM_TYPE_NAME = "zfs_pool"
|
ITEM_TYPE_NAME = "zfs_pool"
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<ZFSPool name:{} device:{} mirrors:{} raidz:{}>".format(
|
return "<ZFSPool name:{} autoexpand:{} autoreplace:{} autotrim:{} config:{}>".format(
|
||||||
self.name,
|
self.name,
|
||||||
self.attributes['device'],
|
self.attributes['autoexpand'],
|
||||||
self.attributes['mirrors'],
|
self.attributes['autoreplace'],
|
||||||
self.attributes['raidz'],
|
self.attributes['autotrim'],
|
||||||
|
self.attributes['config'],
|
||||||
)
|
)
|
||||||
|
|
||||||
def cdict(self):
|
def cdict(self):
|
||||||
return {}
|
ret = {}
|
||||||
|
for i in {'autoexpand', 'autoreplace', 'autotrim'}:
|
||||||
|
if self.attributes.get(i):
|
||||||
|
ret[i] = self.attributes[i]
|
||||||
|
return ret
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def devices_used(self):
|
def devices_used(self):
|
||||||
devices = []
|
devices = set()
|
||||||
if self.attributes['device'] is not None:
|
for option in self.attributes['config']:
|
||||||
devices.append(self.attributes['device'])
|
for device in option['devices']:
|
||||||
if self.attributes['mirrors'] is not None:
|
devices.add(device)
|
||||||
for mirror in self.attributes['mirrors']:
|
return sorted(devices)
|
||||||
devices.extend(mirror)
|
|
||||||
if self.attributes['raidz'] is not None:
|
|
||||||
devices.extend(self.attributes['raidz'])
|
|
||||||
return devices
|
|
||||||
|
|
||||||
def fix(self, status):
|
def fix(self, status):
|
||||||
if status.must_be_created:
|
if status.must_be_created:
|
||||||
if self.attributes['device'] is not None:
|
cmdline = []
|
||||||
create_single(self.node, self.name, self.attributes['device'])
|
for option in self.attributes['config']:
|
||||||
elif self.attributes['mirrors'] is not None:
|
if option.get('type'):
|
||||||
create_mirrors(self.node, self.name, self.attributes['mirrors'])
|
cmdline.append(option['type'])
|
||||||
elif self.attributes['raidz'] is not None:
|
if option['type'] == 'log' and len(option['devices']) > 1:
|
||||||
create_raidz(self.node, self.name, self.attributes['raidz'])
|
cmdline.append('mirror')
|
||||||
elif self.attributes['raidz2'] is not None:
|
for device in sorted(option['devices']):
|
||||||
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz2')
|
cmdline.append(quote(prepare_blockdevice(node, device)))
|
||||||
elif self.attributes['raidz2'] is not None:
|
|
||||||
create_raidz(self.node, self.name, self.attributes['raidz'], 'raidz3')
|
self.run('zpool create {} {}'.format(quote(self.name), ' '.join(cmdline)))
|
||||||
|
|
||||||
|
for attr in {'autoexpand', 'autoreplace', 'autotrim'}:
|
||||||
|
if attr in status.keys_to_fix:
|
||||||
|
state_str = 'on' if status.cdict[attr] else 'off'
|
||||||
|
self.run('zpool set {}={} {}'.format(attr, state_str, quote(self.name)))
|
||||||
|
|
||||||
def sdict(self):
|
def sdict(self):
|
||||||
# We don't care about the device if the pool already exists.
|
status_result = self.run('zpool list {}'.format(quote(self.name)), may_fail=True)
|
||||||
return {} if does_exist(self.node, self.name) else None
|
if status_result.return_code != 0:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
autoexpand_state = self.run('zpool get autoexpand -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||||
|
autoreplace_state = self.run('zpool get autoreplace -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||||
|
autotrim_state = self.run('zpool get autotrim -H -o value {}'.format(quote(self.name)), may_fail=True).stdout.decode().strip()
|
||||||
|
return {
|
||||||
|
'autoexpand': (autoexpand_state == 'on'),
|
||||||
|
'autoreplace': (autoreplace_state == 'on'),
|
||||||
|
'autotrim': (autotrim_state == 'on'),
|
||||||
|
}
|
||||||
|
|
||||||
def test(self):
|
def test(self):
|
||||||
duplicate_devices = [
|
duplicate_devices = [
|
||||||
|
@ -173,15 +151,40 @@ class ZFSPool(Item):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_attributes(cls, bundle, item_id, attributes):
|
def validate_attributes(cls, bundle, item_id, attributes):
|
||||||
device_config = []
|
if not isinstance(attributes['config'], list):
|
||||||
for key in ('device', 'mirrors', 'raidz', 'raidz2', 'raidz3'):
|
|
||||||
device_config.append(attributes.get(key))
|
|
||||||
device_config = [key for key in device_config if key is not None]
|
|
||||||
if len(device_config) != 1:
|
|
||||||
raise BundleError(_(
|
raise BundleError(_(
|
||||||
"{item} on node {node} must have exactly one of "
|
"{item} on node {node} 'config'"
|
||||||
"'device', 'mirrors', 'raidz', 'raidz2' or 'raidz3'"
|
|
||||||
).format(
|
).format(
|
||||||
item=item_id,
|
item=item_id,
|
||||||
node=bundle.node.name,
|
node=bundle.node.name,
|
||||||
))
|
))
|
||||||
|
|
||||||
|
for config in attributes['config']:
|
||||||
|
if not config.get('type', '') in {'', 'mirror', 'raidz', 'raidz2', 'raidz3', 'cache', 'log'}:
|
||||||
|
raise BundleError(_(
|
||||||
|
"{item} on node {node} has invalid type '{type}', "
|
||||||
|
"must be one of (unset), 'mirror', 'raidz', 'raidz2', "
|
||||||
|
"'raidz3', 'cache', 'log'"
|
||||||
|
).format(
|
||||||
|
item=item_id,
|
||||||
|
node=bundle.node.name,
|
||||||
|
type=config['type'],
|
||||||
|
))
|
||||||
|
|
||||||
|
if not config.get('devices', set()):
|
||||||
|
raise BundleError(_(
|
||||||
|
"{item} on node {node} uses no devices!"
|
||||||
|
).format(
|
||||||
|
item=item_id,
|
||||||
|
node=bundle.node.name,
|
||||||
|
))
|
||||||
|
|
||||||
|
if config.get('type') == 'log':
|
||||||
|
if not 0 < len(config['devices']) < 3:
|
||||||
|
raise BundleError(_(
|
||||||
|
"{item} on node {node} type 'log' must use exactly "
|
||||||
|
"one or two devices"
|
||||||
|
).format(
|
||||||
|
item=item_id,
|
||||||
|
node=bundle.node.name,
|
||||||
|
))
|
||||||
|
|
|
@ -197,16 +197,33 @@ nodes['home.nas'] = {
|
||||||
},
|
},
|
||||||
'pools': {
|
'pools': {
|
||||||
# Configured manually. Don't touch!
|
# Configured manually. Don't touch!
|
||||||
'storage': {
|
'storage': [
|
||||||
'raidz2': {
|
{
|
||||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR',
|
'type': 'raidz2',
|
||||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R',
|
'devices': {
|
||||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR',
|
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR',
|
||||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R',
|
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R',
|
||||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR',
|
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR',
|
||||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR',
|
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJGN6R',
|
||||||
|
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJU4NR',
|
||||||
|
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
{
|
||||||
|
'type': 'log',
|
||||||
|
'devices': {
|
||||||
|
'/dev/disk/by-id/ata-TS64GSSD370_B807810503-part1',
|
||||||
|
'/dev/disk/by-id/ata-TS64GSSD370_B807810527-part1',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'type': 'cache',
|
||||||
|
'devices': {
|
||||||
|
'/dev/disk/by-id/ata-TS64GSSD370_B807810503-part2',
|
||||||
|
'/dev/disk/by-id/ata-TS64GSSD370_B807810527-part2',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
},
|
},
|
||||||
'datasets': {
|
'datasets': {
|
||||||
'storage/backups': {},
|
'storage/backups': {},
|
||||||
|
|
|
@ -57,9 +57,9 @@ nodes['htz-cloud.influxdb'] = {
|
||||||
},
|
},
|
||||||
'zfs': {
|
'zfs': {
|
||||||
'pools': {
|
'pools': {
|
||||||
'tank': {
|
'tank': [{
|
||||||
'device': '/dev/sdb',
|
'devices': {'/dev/sdb'},
|
||||||
},
|
}],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -80,9 +80,9 @@ nodes['htz-cloud.luther'] = {
|
||||||
},
|
},
|
||||||
'zfs': {
|
'zfs': {
|
||||||
'pools': {
|
'pools': {
|
||||||
'tank': {
|
'tank': [{
|
||||||
'device': '/dev/sdb',
|
'devices': {'/dev/sdb'},
|
||||||
},
|
}],
|
||||||
},
|
},
|
||||||
'datasets': {
|
'datasets': {
|
||||||
'tank/luther-website': {
|
'tank/luther-website': {
|
||||||
|
|
|
@ -66,9 +66,9 @@ nodes['htz-cloud.pleroma'] = {
|
||||||
},
|
},
|
||||||
'zfs': {
|
'zfs': {
|
||||||
'pools': {
|
'pools': {
|
||||||
'tank': {
|
'tank': [{
|
||||||
'device': '/dev/sdb',
|
'devices': {'/dev/sdb'},
|
||||||
},
|
}],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -69,9 +69,9 @@ nodes['htz-cloud.sewfile'] = {
|
||||||
},
|
},
|
||||||
'zfs': {
|
'zfs': {
|
||||||
'pools': {
|
'pools': {
|
||||||
'tank': {
|
'tank': [{
|
||||||
'device': '/dev/sdb',
|
'devices': {'/dev/sdb'},
|
||||||
},
|
}],
|
||||||
},
|
},
|
||||||
'datasets': {
|
'datasets': {
|
||||||
'tank/mysql': {
|
'tank/mysql': {
|
||||||
|
|
|
@ -133,9 +133,9 @@ nodes['ovh.icinga2'] = {
|
||||||
},
|
},
|
||||||
'zfs': {
|
'zfs': {
|
||||||
'pools': {
|
'pools': {
|
||||||
'tank': {
|
'tank': [{
|
||||||
'device': '/dev/sdb',
|
'devices': {'/dev/sdb'},
|
||||||
},
|
}],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
'vm': {
|
'vm': {
|
||||||
|
|
|
@ -310,14 +310,15 @@ nodes['rx300'] = {
|
||||||
'zfs_arc_max_gb': 16,
|
'zfs_arc_max_gb': 16,
|
||||||
},
|
},
|
||||||
'pools': {
|
'pools': {
|
||||||
'tank': {
|
'tank': [{
|
||||||
'raidz': {
|
'type': 'raidz',
|
||||||
|
'devices': {
|
||||||
'/dev/sda',
|
'/dev/sda',
|
||||||
'/dev/sdb',
|
'/dev/sdb',
|
||||||
'/dev/sdc',
|
'/dev/sdc',
|
||||||
'/dev/sdd',
|
'/dev/sdd',
|
||||||
},
|
},
|
||||||
},
|
}],
|
||||||
},
|
},
|
||||||
'datasets': {
|
'datasets': {
|
||||||
'tank/libvirt': {
|
'tank/libvirt': {
|
||||||
|
|
Loading…
Reference in a new issue