bundlewrap/bundles/backup-server/metadata.py

120 lines
3.1 KiB
Python
Raw Normal View History

defaults = {
'openssh': {
'allowed_users': {
# Usernames for backup clients always start with 'c-'
'c-*',
},
},
}
@metadata_reactor.provides(
'backup-server/clients',
)
def get_my_clients(metadata):
my_clients = {}
for rnode in repo.nodes:
if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False):
continue
my_clients[rnode.name] = {
'user': rnode.metadata.get('backup-client/user-name'),
}
return {
'backup-server': {
'clients': my_clients,
},
}
@metadata_reactor.provides(
'backup-server/zfs-base',
'dm-crypt/encrypted-devices',
'zfs/pools',
)
def zfs_pool(metadata):
if not metadata.get('backup-server/encrypted-devices', {}):
return {}
crypt_devices = {}
pool_devices = set()
unlock_actions = set()
for number, (device, passphrase) in enumerate(sorted(metadata.get('backup-server/encrypted-devices', {}).items())):
crypt_devices[device] = {
'dm-name': f'backup{number}',
'passphrase': passphrase,
}
pool_devices.add(f'/dev/mapper/backup{number}')
unlock_actions.add(f'action:dm-crypt_open_backup{number}')
pool_opts = {
'devices': pool_devices,
}
if len(pool_devices) > 2:
pool_opts['type'] = 'raidz'
elif len(pool_devices) > 1:
pool_opts['type'] = 'mirror'
return {
'backup-server': {
'zfs-base': 'backups',
},
'dm-crypt': {
'encrypted-devices': crypt_devices,
},
'zfs': {
'pools': {
'backups': {
'when_creating': {
'config': [
pool_opts,
],
},
'needs': unlock_actions,
# That's a bit hacky. We do it this way to auto-import
# the pool after decrypting the devices. Otherwise
# the pool wouldn't exist, which leads to bundlewrap
# trying to re-create the pool.
# Also, -N to not auto-mount anything.
'unless': 'zpool import -N backups',
},
},
}
}
@metadata_reactor.provides(
'zfs/datasets',
'zfs/snapshots/retain_per_dataset',
)
def zfs_datasets_and_snapshots(metadata):
zfs_datasets = {}
zfs_retains = {}
retain_defaults = {
'weekly': 4,
'monthly': 6,
}
for client in metadata.get('backup-server/clients', {}).keys():
dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client)
zfs_datasets[dataset] = {
'mountpoint': '/srv/backups/{}'.format(client),
'compression': 'on',
}
zfs_retains[dataset] = retain_defaults.copy()
return {
'zfs': {
'datasets': zfs_datasets,
'snapshots': {
'retain_per_dataset': zfs_retains,
},
},
}