bundles/zfs: import bundle from work repository

This commit is contained in:
Franzi 2020-08-29 21:10:59 +02:00
parent b690ae25b0
commit 4934eb46fb
Signed by: kunsi
GPG key ID: 12E3D2136B818350
11 changed files with 841 additions and 0 deletions

View file

@ -0,0 +1,33 @@
#!/bin/sh
monitoring=/var/tmp/zfs-auto-snapshot.status
crit_days=1
uptime=$(cut -d. -f1 /proc/uptime)
if [ "$uptime" -lt 3600 ]
then
echo 'OK - The system has just booted'
exit 0
fi
now=$(date +%s)
timestamp=$(cat "$monitoring")
if [ -z "$timestamp" ]
then
echo 'UNKNOWN - No status info found'
exit 3
fi
if [ "$timestamp" = 0 ]
then
echo 'OK - Snapshots disabled'
exit 0
elif [ $(( now - timestamp )) -gt $(( 60 * 60 * 24 * crit_days )) ]
then
echo "CRITICAL - Status file indicates age greater than $crit_days day(s)"
exit 2
else
echo 'OK'
exit 0
fi

View file

@ -0,0 +1,63 @@
#!/usr/bin/env python3
from datetime import datetime, timedelta
from re import match
from subprocess import check_output
from sys import exit
output = check_output(['zfs', 'get', 'creation', '-Hpr', '-t', 'snapshot'])
now = int(datetime.now().timestamp())
warn_age = now - (60 * 60 * 24 * 60)
crit_age = now - (60 * 60 * 24 * 90)
warn_snapshots = set()
crit_snapshots = set()
return_code = 0
for line in output.decode('utf-8').split("\n"):
if line.strip() == '':
continue
items = line.split("\t")
# If the snapshot name contains 'zfs-auto-snap', it's probably
# really an automated snapshot and will be cleaned up eventually.
# This check only cares about manually created snapshots, though.
if 'zfs-auto-snap' in items[0]:
continue
# These are docker-internal snapshots and should not be touched by
# us.
if match(r'^tank/docker/[a-z0-9]+(-init)?@[0-9]+', items[0]):
continue
# line should be '<snapshot> creation <timestamp> -', separated by
# tabstops.
if len(items) < 3:
print('UNKNOWN - error while parsing ' + line)
exit(3)
creation_date = int(items[2])
if creation_date < crit_age:
crit_snapshots.add(items[0])
elif creation_date < warn_age:
warn_snapshots.add(items[0])
# We have to do additional loops in here to have CRITICAL items on top.
for snap in sorted(crit_snapshots):
print('CRITICAL - {} is older than 90 days'.format(snap))
for snap in sorted(warn_snapshots):
print('WARN - {} is older than 60 days'.format(snap))
if len(crit_snapshots) > 0:
return_code = 2
elif len(warn_snapshots) > 0:
return_code = 1
else:
print('OK - no snapshots are older than 60 days')
exit(return_code)

View file

@ -0,0 +1,26 @@
#!/bin/bash
# Written by jpaul, 2017-03
# Extended by El Pinguino, 2017-07
<%
datasetlist = sorted(node.metadata.get('zfs', {}).get('datasets', {}).items())
volumes = []
for dataset, options in datasetlist:
if options.get('mountpoint', 'none') != 'none':
volumes.append(dataset)
%>\
exitcode=0
% for volume in volumes:
if [[ "$(zfs get -Hp -o value mounted '${volume}')" != "yes" ]]
then
echo 'CRITICAL - ${volume} not mounted'
exitcode=2
fi
% endfor
if (( exitcode == 0 ))
then
echo OK
fi
exit $exitcode

View file

@ -0,0 +1,15 @@
#!/bin/bash
if [ $# -eq 0 ] ; then
echo "Please provide pool name as first argument, e.g. 'tank'."
exit 3
fi
if [ "$(zpool status "$1" | grep '^ state:')" = ' state: ONLINE' ]
then
echo "OK - Pool '$1' is online"
exit 0
else
echo "CRITICAL - Pool '$1' is FAULTY or NOT ONLINE"
exit 2
fi

View file

@ -0,0 +1,40 @@
#!/usr/bin/env python3
from subprocess import check_output
from sys import argv, exit
import re
def to_bytes(size):
suffixes = ['', 'K', 'M', 'G', 'T', 'P']
number, suffix = re.match(r'([0-9\.]+)([A-Z]?)', size).groups()
assert suffix in suffixes, 'Unexpected suffix "{}" in size "{}"'.format(suffix, size)
return float(number) * 1024**suffixes.index(suffix)
pool = argv[1]
critical_perc = float(argv[2])
try:
output = check_output(['zpool', 'list', '-Ho', 'size,alloc', pool])
except:
print('CRITICAL - "zpool" failed')
exit(2)
size, alloc = output.decode('UTF-8').strip().split()
try:
size_b = to_bytes(size)
alloc_b = to_bytes(alloc)
except:
print('CRITICAL - Could not process output of "zpool list": {}'.format(output))
exit(2)
percentage = alloc_b / size_b * 100
if percentage > critical_perc:
print('CRITICAL - Pool "{}" uses {:.2f}% of its space'.format(pool, percentage))
exit(2)
print('OK - Pool "{}" uses {:.2f}% of its space'.format(pool, percentage))
exit(0)

View file

@ -0,0 +1,57 @@
#!/usr/bin/env python3
import re
from datetime import datetime
from json import loads
from subprocess import check_call, check_output
from sys import argv
def create_snap_and_rotate(ds, label, retain, now, all_snapshots):
new_snap = '{}@zfs-auto-snap_{}-{}'.format(ds, label, now)
check_call(['zfs', 'snapshot', new_snap])
prefix = '{}@zfs-auto-snap_{}-'.format(ds, label)
my_candidates = []
for i in sorted(all_snapshots):
if i.startswith(prefix):
my_candidates.append(i)
my_candidates.append(new_snap)
for i in my_candidates[:-retain]:
assert '@' in i, 'BUG! Dataset "{}" has no @!'.format(i)
check_call(['zfs', 'destroy', i])
label = argv[1]
with open('/etc/zfs-snapshot-config.json', 'r') as fp:
metadata = loads(fp.read())
if 'snapshot_only' in metadata:
datasets = set(metadata['snapshot_only'])
else:
output = check_output(['zfs', 'list', '-H', '-o', 'name']).decode('UTF-8')
datasets = set(output.splitlines())
for pattern in metadata.get('snapshot_never', set()):
datasets = set(filter(lambda x: not re.search(pattern, x), datasets))
default_retain = metadata['retain_defaults'][label]
now = datetime.now().strftime('%F-%H%M')
snapshots_created = False
if datasets:
all_snapshots = check_output(['zfs', 'list', '-H', '-o', 'name', '-t', 'snap']).decode('UTF-8').splitlines()
for ds in datasets:
retain = int(metadata.get('retain_per_dataset', {}).get(ds, {}).get(label, default_retain))
if retain > 0:
create_snap_and_rotate(ds, label, retain, now, all_snapshots)
snapshots_created = True
with open('/var/tmp/zfs-auto-snapshot.status', 'w') as fp:
fp.write('{}\n'.format(datetime.now().strftime('%s') if snapshots_created else 0))

View file

@ -0,0 +1,6 @@
<%
arc_max_mb = node.metadata.get('zfs', {}).get('module_options', {}).get('zfs_arc_max_mb', 1024)
%>\
% if arc_max_mb != 0:
options zfs zfs_arc_max=${arc_max_mb * 1024 * 1024}
% endif

141
bundles/zfs/items.py Normal file
View file

@ -0,0 +1,141 @@
from json import dumps
from os.path import join
from bundlewrap.metadata import MetadataJSONEncoder
actions = {}
pkg_apt = {
'zfs-zed': {
'needed_by': {
'zfs_dataset:',
'zfs_pool:',
},
},
'zfsutils-linux': {
'needed_by': {
'zfs_dataset:',
'zfs_pool:',
},
},
'parted': {
'needed_by': {
'zfs_pool:',
},
},
}
files = {
'/etc/cron.d/zfsutils-linux': {
'delete': True,
'needs': ['pkg_apt:zfsutils-linux'],
},
"/etc/modprobe.d/zfs.conf": {
'source': 'zfs-modprobe.conf',
'content_type': 'mako',
'mode': '0755',
},
'/etc/zfs-snapshot-config.json': {
'content': dumps(
node.metadata.get('zfs', {}).get('snapshots', {}),
cls=MetadataJSONEncoder, # turns sets into sorted lists
indent=4,
sort_keys=True,
) + '\n',
},
'/etc/zfs/zed.d/zed.rc': {
'content': 'ZED_EMAIL_ADDR="hostmaster@kunbox.net"\nZED_EMAIL_PROG="mail"\nZED_NOTIFY_INTERVAL_SECS=3600\n',
'mode': '0600',
'triggers': ['svc_systemd:zed:restart'],
},
'/usr/local/sbin/zfs-auto-snapshot': {
'mode': '0755',
},
'/usr/lib/nagios/plugins/check_zfs_auto_snapshot': {
'mode': '0755',
},
'/usr/lib/nagios/plugins/check_zfs_old_snapshots': {
'mode': '0755',
},
"/usr/lib/nagios/plugins/check_zfs_volumes": {
'mode': '0755',
'content_type': 'mako',
},
"/usr/lib/nagios/plugins/check_zpool_online": {
'mode': '0755',
},
"/usr/lib/nagios/plugins/check_zpool_space": {
'mode': '0755',
},
}
svc_systemd = {
'zed': {
'needs': ['pkg_apt:zfs-zed'],
},
}
zfs_datasets = node.metadata.get('zfs', {}).get('datasets', {})
zfs_pools = {}
for name, attrs in node.metadata.get('zfs', {}).get('pools', {}).items():
zfs_pools[name] = attrs
# Not yet supported on debian buster
#actions[f'pool_{name}_enable_trim'] = {
# 'command': f'zpool set autotrim=on {name}',
# 'unless': f'zpool get autotrim -H -o value {name} | grep -q on',
# 'needs': [
# f'zfs_pool:{name}'
# ]
#}
directories = {
"/etc/zfs-snapshot-backup-pre.d": {
'purge': True,
},
"/etc/zfs-snapshot-backup-post.d": {
'purge': True,
},
"/etc/zfs-snapshot-backup-final.d": {
'purge': True,
},
}
# TODO implement when we start managing backups via bundlewrap
#if node.metadata.get('zfs', {}).get('snapshots', {}).get('backup', {}).get('enabled', True):
# directories["/mnt/zfs-snapshot-backup"] = {}
#
# files["/usr/local/sbin/zfs-backup-snapshot"] = {
# 'content_type': 'mako',
# 'context': {
# # Set by our own metadata processor, guaranteed to exist.
# 'filesystems': node.metadata['zfs']['snapshots']['backup']['filesystems_with_snapshot'],
# },
# 'mode': '0755',
# }
# files["/usr/local/sbin/zfs-backup-snapshot-unmount"] = {
# 'content_type': 'mako',
# 'context': {
# # Set by our own metadata processor, guaranteed to exist.
# 'filesystems': node.metadata['zfs']['snapshots']['backup']['filesystems_with_snapshot'],
# },
# 'mode': '0755',
# }
#
#else:
# files["/mnt/zfs-snapshot-backup"] = {'delete': True}
# TODO when we start using telegraf
#if node.has_bundle('telegraf'):
# files['/etc/telegraf-zfs-dataset.conf'] = {
# 'content': dumps(
# node.metadata.get('zfs', {}),
# cls=MetadataJSONEncoder,
# indent=4,
# sort_keys=True,
# ) + '\n',
# }
# files['/usr/local/bin/telegraf-zfs-dataset'] = {
# 'mode': '0775',
# }

139
bundles/zfs/metadata.py Normal file
View file

@ -0,0 +1,139 @@
import re
defaults = {
'cron': {
'zfs-auto-snapshot-daily': '0 0 * * * root /usr/local/sbin/zfs-auto-snapshot daily',
'zfs-auto-snapshot-hourly': '0 * * * * root /usr/local/sbin/zfs-auto-snapshot hourly',
'zfs-auto-snapshot-monthly': '0 0 1 * * root /usr/local/sbin/zfs-auto-snapshot monthly',
'zfs-auto-snapshot-weekly': '0 0 * * 7 root /usr/local/sbin/zfs-auto-snapshot weekly',
},
'zfs': {
'datasets': {},
'pools': {},
'snapshots': {
# 'backup': {
# 'enabled': True,
# 'filesystems_with_snapshot': {},
# },
'retain_defaults': {
'hourly': 24,
'daily': 7,
'weekly': 2,
'monthly': 1,
},
},
},
}
#if node.has_bundle('telegraf'):
# defaults.update({
# 'telegraf': {
# 'input_plugins': {
# 'exec': {
# 'zfs_dataset': {
# 'command': 'telegraf-zfs-dataset',
# 'interval': '120s',
# },
# },
# 'zfs': {},
# },
# },
# })
# defaults['sudo']['verbatim'].add('telegraf ALL=(ALL) NOPASSWD:/sbin/zfs list *')
if node.has_bundle('sshmon'):
defaults.update({
'icinga2_api': {
'zfs': {
'services': {
'ZFS AUTO SNAPSHOT': {
'command_on_monitored_host': '/usr/lib/nagios/plugins/check_zfs_auto_snapshot',
},
'ZFS MOUNTED VOLUMES': {
'command_on_monitored_host': '/usr/lib/nagios/plugins/check_zfs_volumes',
},
},
},
},
})
@metadata_reactor
def zfs_scrub_cronjob(metadata):
when = metadata.get('zfs/scrub/cron', '{} 0 * * sun'.format((node.magic_number % 60)))
return {
'cron': {
'zfs-scrub': '{} root /usr/lib/zfs-linux/scrub'.format(when),
},
}
# TODO
#@metadata_reactor
#def zfs_snapshot_backup(metadata):
# if metadata.get('zfs/snapshots/backup/enabled'):
# # Collect all filesystems/datasets (e.g., "tank/mysql") which
# # are configured for (local) snapshots. For each of them, store
# # the mountpoint. This information will be used primarily by
# # "/usr/local/sbin/zfs-backup-snapshot", but may also be used by
# # other bundles (think backup tools).
# #
# # In other words, this API allows other bundles to check whether
# # a path belongs to a ZFS dataset with snapshots enabled.
#
# filesystems = {}
#
# if metadata.get('zfs/snapshots/snapshot_only', None) is not None:
# for name in metadata.get('zfs/snapshots/snapshot_only'):
# attrs = metadata.get('zfs/datasets')[name]
# if attrs.get('mountpoint') not in (None, "none"):
# filesystems[name] = attrs['mountpoint']
# else:
# for name, attrs in metadata.get('zfs/datasets').items():
# if attrs.get('mountpoint') not in (None, "none"):
# filesystems[name] = attrs['mountpoint']
#
# for pattern in metadata.get('zfs/snapshots/snapshot_never', set()):
# filesystems = {k: v for k, v in filesystems.items() if not re.search(pattern, k)}
#
# return {
# 'zfs': {
# 'snapshots': {
# 'backup': {
# 'filesystems_with_snapshot': filesystems,
# },
# },
# },
# }
# else:
# return {}
@metadata_reactor
def monitoring(metadata):
if not node.has_bundle('sshmon'):
raise DoNotRunAgain
services = {}
for poolname, pool_options in metadata.get('zfs/pools').items():
services['ZFS ZPOOL ONLINE {}'.format(poolname)] = {
'command_on_monitored_host': 'sudo /usr/lib/nagios/plugins/check_zpool_online {}'.format(poolname),
}
services['ZFS ZPOOL SPACE ' + poolname] = {
'command_on_monitored_host': 'sudo /usr/lib/nagios/plugins/check_zpool_space {} 90'.format(poolname)
}
services['ZFS OLD SNAPSHOTS'] = {
'command_on_monitored_host': 'sudo /usr/lib/nagios/plugins/check_zfs_old_snapshots',
}
return {
'icinga2_api': {
'zfs': {
'services': services,
},
},
}

140
items/zfs_dataset.py Normal file
View file

@ -0,0 +1,140 @@
from pipes import quote
from bundlewrap.items import Item
from bundlewrap.utils.text import mark_for_translation as _
def create(node, path, options):
option_list = []
for option, value in sorted(options.items()):
# We must exclude the 'mounted' property here because it's a
# read-only "informational" property.
if option != 'mounted' and value is not None:
option_list.append("-o {}={}".format(quote(option), quote(value)))
option_args = " ".join(option_list)
node.run(
"zfs create {} {}".format(
option_args,
quote(path),
),
may_fail=True,
)
if options['mounted'] == 'no':
set_option(node, path, 'mounted', 'no')
def does_exist(node, path):
status_result = node.run(
"zfs list {}".format(quote(path)),
may_fail=True,
)
return status_result.return_code == 0
def get_option(node, path, option):
cmd = "zfs get -Hp -o value {} {}".format(quote(option), quote(path))
# We always expect this to succeed since we don't call this function
# if we have already established that the dataset does not exist.
status_result = node.run(cmd)
return status_result.stdout.decode('utf-8').strip()
def set_option(node, path, option, value):
if option == 'mounted':
# 'mounted' is a read-only property that can not be altered by
# 'set'. We need to call 'zfs mount tank/foo'.
node.run(
"zfs {} {}".format(
"mount" if value == 'yes' else "unmount",
quote(path),
),
may_fail=True,
)
else:
node.run(
"zfs set {}={} {}".format(
quote(option),
quote(value),
quote(path),
),
may_fail=True,
)
class ZFSDataset(Item):
"""
Creates ZFS datasets and manages their options.
"""
BUNDLE_ATTRIBUTE_NAME = "zfs_datasets"
ITEM_ATTRIBUTES = {
'atime': None,
'acltype': None,
'compression': None,
'mountpoint': None,
'quota': None,
'recordsize': None,
'dedup': None,
}
ITEM_TYPE_NAME = "zfs_dataset"
def __repr__(self):
return f"<ZFSDataset name:{self.name} {' '.join(f'{k}:{v}' for k,v in self.attributes.items())}>"
def cdict(self):
cdict = {}
for option, value in self.attributes.items():
if option == 'mountpoint' and value is None:
value = "none"
if value is not None:
cdict[option] = value
cdict['mounted'] = 'no' if cdict.get('mountpoint') in (None, "none") else 'yes'
return cdict
def fix(self, status):
if status.must_be_created:
create(self.node, self.name, status.cdict)
else:
for option in status.keys_to_fix:
set_option(self.node, self.name, option, status.cdict[option])
def get_auto_deps(self, items):
pool = self.name.split("/")[0]
pool_item = "zfs_pool:{}".format(pool)
pool_item_found = False
for item in items:
if item.ITEM_TYPE_NAME == "zfs_pool" and item.name == pool:
# Add dependency to the pool this dataset resides on.
pool_item_found = True
yield pool_item
elif (
item.ITEM_TYPE_NAME == "zfs_dataset" and
self.name != item.name and
self.name.startswith(item.name + "/")
):
# Find all other datasets that are parents of this
# dataset.
# XXX Could be optimized by finding the "largest"
# parent only.
yield item.id
if not pool_item_found:
raise Exception(_(
"ZFS dataset {dataset} resides on pool {pool} but item "
"{dep} does not exist"
).format(
dataset=self.name,
pool=pool,
dep=pool_item,
))
def sdict(self):
if not does_exist(self.node, self.name):
return None
sdict = {}
for option, value in self.attributes.items():
sdict[option] = get_option(self.node, self.name, option)
sdict['mounted'] = get_option(self.node, self.name, 'mounted')
return sdict

181
items/zfs_pool.py Normal file
View file

@ -0,0 +1,181 @@
from collections import Counter
from pipes import quote
from bundlewrap.exceptions import BundleError
from bundlewrap.items import Item
from bundlewrap.utils.text import mark_for_translation as _
def create_mirrors(node, path, mirrors):
cmd = ""
for devices in mirrors:
actual_targets = []
for device in devices:
actual_targets.append(quote(prepare_blockdevice(node, device)))
cmd += "mirror {} ".format(" ".join(actual_targets))
node.run("zpool create {} {}".format(quote(path), cmd))
node.run("zfs unmount {}".format(quote(path)))
def create_raidz(node, path, devices):
cmd = ""
actual_targets = []
for device in devices:
actual_targets.append(quote(prepare_blockdevice(node, device)))
cmd += "raidz {} ".format(" ".join(actual_targets))
node.run("zpool create {} {}".format(quote(path), cmd))
node.run("zfs unmount {}".format(quote(path)))
def create_single(node, path, device):
actual_target = prepare_blockdevice(node, device)
node.run("zpool create {} {}".format(quote(path), quote(actual_target)))
node.run("zfs unmount {}".format(quote(path)))
def does_exist(node, path):
status_result = node.run(
"zpool list {}".format(quote(path)),
may_fail=True,
)
return status_result.return_code == 0
def prepare_blockdevice(node, device):
# To increase our chances of success, we run partprobe beforehand to
# make the kernel re-scan all devices.
node.run("partprobe", may_fail=True)
# Try to find out if the device already contains some filesystem.
# Please note that there is no 100% reliable way to do this.
res = node.run("lsblk -rndo fstype {}".format(quote(device)))
detected = res.stdout.decode('UTF-8').strip()
if detected != "":
raise Exception(_("Device {} to be used for ZFS, but it is not empty! Has '{}'.").format(
device, detected))
res = node.run("lsblk -rndo type {}".format(device))
if res.stdout.decode('UTF-8').strip() == "disk":
# We create a new partition on a raw disk. That partition will
# be used for ZFS. Yes, this is on purpose. No, creating a pool
# on raw disks does not work properly on Linux.
node.run("parted {} mklabel gpt".format(quote(device)))
node.run("parted {} mkpart -a optimal primary 0% 100%".format(quote(device)))
node.run("partprobe")
# Simply append a "1" to get to the first partition.
#
# XXX I know that this fails if you're using /dev/disk/by-*.
# Yes, this is a problem if your device names are not
# predictable. Yes, we could use "lsblk" to try to find the
# first partition ... but "lsblk" still reports it as
# "/dev/vdb1" instead of "/dev/disk/by-foo/bar-part1".
#
# This is an unsolved problem. Please configure your VMs to use
# predictable device names.
if device.find('nvme') != -1: # NVME Devices have the partitions with the prefix pX
partition = "{}p1".format(device)
else:
partition = "{}1".format(device)
return partition
else:
return device
class ZFSPool(Item):
"""
Creates ZFS pools and the required partitions.
"""
BUNDLE_ATTRIBUTE_NAME = "zfs_pools"
ITEM_ATTRIBUTES = {
'device': None,
'mirrors': None,
'raidz': None,
}
ITEM_TYPE_NAME = "zfs_pool"
def __repr__(self):
return "<ZFSPool name:{} device:{} mirrors:{} raidz:{}>".format(
self.name,
self.attributes['device'],
self.attributes['mirrors'],
self.attributes['raidz'],
)
def cdict(self):
return {}
@property
def devices_used(self):
devices = []
if self.attributes['device'] is not None:
devices.append(self.attributes['device'])
if self.attributes['mirrors'] is not None:
for mirror in self.attributes['mirrors']:
devices.extend(mirror)
if self.attributes['raidz'] is not None:
devices.extend(self.attributes['raidz'])
return devices
def fix(self, status):
if status.must_be_created:
if self.attributes['device'] is not None:
create_single(self.node, self.name, self.attributes['device'])
elif self.attributes['mirrors'] is not None:
create_mirrors(self.node, self.name, self.attributes['mirrors'])
elif self.attributes['raidz'] is not None:
create_raidz(self.node, self.name, self.attributes['raidz'])
def sdict(self):
# We don't care about the device if the pool already exists.
return {} if does_exist(self.node, self.name) else None
def test(self):
duplicate_devices = [
item for item, count in Counter(self.devices_used).items() if count > 1
]
if duplicate_devices:
raise BundleError(_(
"{item} on node {node} uses {devices} more than once as an underlying device"
).format(
item=self.id,
node=self.node.name,
devices=_(" and ").join(duplicate_devices),
))
# Have a look at all other ZFS pools on this node and check if
# multiple pools try to use the same device.
for item in self.node.items:
if (
item.ITEM_TYPE_NAME == "zfs_pool" and
item.name != self.name and
set(item.devices_used).intersection(set(self.devices_used))
):
raise BundleError(_(
"Both the ZFS pools {self} and {other} on node {node} "
"try to use {devices} as the underlying storage device"
).format(
self=self.name,
other=item.name,
node=self.node.name,
devices=_(" and ").join(set(item.devices_used).intersection(set(self.devices_used))),
))
@classmethod
def validate_attributes(cls, bundle, item_id, attributes):
device_config = []
for key in ('device', 'mirrors', 'raidz'):
device_config.append(attributes.get(key))
device_config = [key for key in device_config if key is not None]
if len(device_config) != 1:
raise BundleError(_(
"{item} on node {node} must have exactly one of "
"'device', 'mirrors', or 'raidz'"
).format(
item=item_id,
node=bundle.node.name,
))