2025-02-14 19:32:13 +01:00
|
|
|
from bundlewrap.exceptions import BundleError
|
|
|
|
|
2020-11-14 14:35:54 +01:00
|
|
|
defaults = {
|
2022-01-04 17:14:55 +01:00
|
|
|
'backup-server': {
|
|
|
|
'my_ssh_port': 22,
|
|
|
|
},
|
2020-11-14 14:35:54 +01:00
|
|
|
'openssh': {
|
|
|
|
'allowed_users': {
|
|
|
|
# Usernames for backup clients always start with 'c-'
|
|
|
|
'c-*',
|
|
|
|
},
|
|
|
|
},
|
2025-02-14 21:25:10 +01:00
|
|
|
'systemd-timers': {
|
|
|
|
'timers': {
|
|
|
|
'check_backup_for_node-cron': {
|
|
|
|
'command': '/usr/local/share/icinga/plugins/check_backup_for_node-cron',
|
|
|
|
'when': '*-*-* *:00/5:00', # every five minutes
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
2022-04-06 18:13:23 +02:00
|
|
|
'zfs': {
|
|
|
|
# The whole point of doing backups is to keep them for a long
|
|
|
|
# time, which eliminates the need for this check.
|
|
|
|
'enable_old_snapshots_check': False,
|
|
|
|
},
|
2020-11-14 14:35:54 +01:00
|
|
|
}
|
|
|
|
|
2021-01-07 18:44:38 +01:00
|
|
|
@metadata_reactor.provides(
|
|
|
|
'backup-server/clients',
|
2022-01-04 17:14:55 +01:00
|
|
|
'backup-server/my_hostname',
|
2021-01-07 18:44:38 +01:00
|
|
|
)
|
2020-11-13 12:36:52 +01:00
|
|
|
def get_my_clients(metadata):
|
|
|
|
my_clients = {}
|
2022-01-05 09:53:18 +01:00
|
|
|
retain_defaults = {
|
|
|
|
'daily': 14,
|
|
|
|
'weekly': 4,
|
|
|
|
'monthly': 6,
|
|
|
|
}
|
2020-11-13 12:36:52 +01:00
|
|
|
|
|
|
|
for rnode in repo.nodes:
|
2021-01-16 22:22:51 +01:00
|
|
|
if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False):
|
2020-11-13 12:36:52 +01:00
|
|
|
continue
|
|
|
|
|
2022-01-04 17:14:55 +01:00
|
|
|
if node.name != rnode.metadata.get('backup-client/target'):
|
|
|
|
continue
|
|
|
|
|
2020-11-13 12:36:52 +01:00
|
|
|
my_clients[rnode.name] = {
|
2024-02-13 14:24:27 +01:00
|
|
|
'exclude_from_monitoring': rnode.metadata.get(
|
|
|
|
'backup-client/exclude_from_monitoring',
|
|
|
|
rnode.metadata.get(
|
|
|
|
'icinga_options/exclude_from_monitoring',
|
|
|
|
False,
|
|
|
|
),
|
|
|
|
),
|
2022-02-12 19:04:15 +01:00
|
|
|
'one_backup_every_hours': rnode.metadata.get('backup-client/one_backup_every_hours', 24),
|
2024-02-13 14:24:27 +01:00
|
|
|
'user': rnode.metadata.get('backup-client/user-name'),
|
2022-01-05 09:53:18 +01:00
|
|
|
'retain': {
|
|
|
|
'daily': rnode.metadata.get('backups/retain/daily', retain_defaults['daily']),
|
|
|
|
'weekly': rnode.metadata.get('backups/retain/weekly', retain_defaults['weekly']),
|
|
|
|
'monthly': rnode.metadata.get('backups/retain/monthly', retain_defaults['monthly']),
|
|
|
|
},
|
2020-11-13 12:36:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return {
|
|
|
|
'backup-server': {
|
|
|
|
'clients': my_clients,
|
2022-01-04 17:14:55 +01:00
|
|
|
'my_hostname': metadata.get('hostname'),
|
2020-11-13 12:36:52 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-01-04 15:08:52 +01:00
|
|
|
@metadata_reactor.provides(
|
|
|
|
'backup-server/zfs-base',
|
|
|
|
'dm-crypt/encrypted-devices',
|
|
|
|
'zfs/pools',
|
|
|
|
)
|
|
|
|
def zfs_pool(metadata):
|
|
|
|
if not metadata.get('backup-server/encrypted-devices', {}):
|
|
|
|
return {}
|
|
|
|
|
|
|
|
crypt_devices = {}
|
|
|
|
unlock_actions = set()
|
|
|
|
|
2025-02-14 19:32:13 +01:00
|
|
|
devices = metadata.get('backup-server/encrypted-devices')
|
|
|
|
|
|
|
|
# TODO remove this once we have migrated all systems
|
|
|
|
if isinstance(devices, dict):
|
|
|
|
pool_devices = set()
|
|
|
|
|
|
|
|
for number, (device, passphrase) in enumerate(sorted(devices.items())):
|
|
|
|
crypt_devices[device] = {
|
|
|
|
'dm-name': f'backup{number}',
|
|
|
|
'passphrase': passphrase,
|
|
|
|
}
|
|
|
|
pool_devices.add(f'/dev/mapper/backup{number}')
|
|
|
|
unlock_actions.add(f'action:dm-crypt_open_backup{number}')
|
|
|
|
|
|
|
|
pool_config = [{
|
|
|
|
'devices': pool_devices,
|
|
|
|
}]
|
|
|
|
|
|
|
|
if len(pool_devices) > 2:
|
|
|
|
pool_config[0]['type'] = 'raidz'
|
|
|
|
elif len(pool_devices) > 1:
|
|
|
|
pool_config[0]['type'] = 'mirror'
|
|
|
|
|
|
|
|
elif isinstance(devices, list):
|
|
|
|
pool_config = []
|
|
|
|
|
|
|
|
for idx, intended_pool in enumerate(devices):
|
|
|
|
pool_devices = set()
|
|
|
|
|
|
|
|
for number, (device, passphrase) in enumerate(sorted(intended_pool.items())):
|
|
|
|
crypt_devices[device] = {
|
|
|
|
'dm-name': f'backup{idx}-{number}',
|
|
|
|
'passphrase': passphrase,
|
|
|
|
}
|
|
|
|
pool_devices.add(f'/dev/mapper/backup{idx}-{number}')
|
|
|
|
unlock_actions.add(f'action:dm-crypt_open_backup{idx}-{number}')
|
|
|
|
|
|
|
|
pool_config.append({
|
|
|
|
'devices': pool_devices,
|
|
|
|
'type': 'raidz',
|
|
|
|
})
|
|
|
|
else:
|
|
|
|
raise BundleError(f'{node.name}: unsupported configuration for backup-server/encrypted-devices')
|
2022-01-04 15:08:52 +01:00
|
|
|
|
|
|
|
return {
|
|
|
|
'backup-server': {
|
|
|
|
'zfs-base': 'backups',
|
|
|
|
},
|
|
|
|
'dm-crypt': {
|
|
|
|
'encrypted-devices': crypt_devices,
|
|
|
|
},
|
|
|
|
'zfs': {
|
|
|
|
'pools': {
|
|
|
|
'backups': {
|
|
|
|
'when_creating': {
|
2025-02-14 19:32:13 +01:00
|
|
|
'config': pool_config,
|
|
|
|
**metadata.get('backup-server/zpool_create_options', {}),
|
2022-01-04 15:08:52 +01:00
|
|
|
},
|
|
|
|
'needs': unlock_actions,
|
2022-01-04 15:24:22 +01:00
|
|
|
# That's a bit hacky. We do it this way to auto-import
|
|
|
|
# the pool after decrypting the devices. Otherwise
|
|
|
|
# the pool wouldn't exist, which leads to bundlewrap
|
|
|
|
# trying to re-create the pool.
|
|
|
|
# Also, -N to not auto-mount anything.
|
|
|
|
'unless': 'zpool import -N backups',
|
2022-01-04 15:08:52 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-01-07 18:44:38 +01:00
|
|
|
@metadata_reactor.provides(
|
|
|
|
'zfs/datasets',
|
2022-01-05 09:53:18 +01:00
|
|
|
'zfs/snapshots/snapshot_never',
|
2021-01-07 18:44:38 +01:00
|
|
|
)
|
2022-01-04 15:08:52 +01:00
|
|
|
def zfs_datasets_and_snapshots(metadata):
|
2020-11-13 12:36:52 +01:00
|
|
|
zfs_datasets = {}
|
|
|
|
|
|
|
|
for client in metadata.get('backup-server/clients', {}).keys():
|
|
|
|
dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client)
|
|
|
|
|
|
|
|
zfs_datasets[dataset] = {
|
|
|
|
'mountpoint': '/srv/backups/{}'.format(client),
|
2021-06-25 20:04:10 +02:00
|
|
|
'compression': 'on',
|
2020-11-13 12:36:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
'zfs': {
|
|
|
|
'datasets': zfs_datasets,
|
|
|
|
'snapshots': {
|
2022-01-05 09:53:18 +01:00
|
|
|
'snapshot_never': {
|
|
|
|
metadata.get('backup-server/zfs-base'),
|
|
|
|
},
|
2020-11-13 12:36:52 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-01-05 22:44:55 +01:00
|
|
|
|
|
|
|
|
|
|
|
@metadata_reactor.provides(
|
|
|
|
'icinga2_api/backup-server/services',
|
|
|
|
)
|
|
|
|
def monitoring(metadata):
|
|
|
|
services = {}
|
|
|
|
|
2022-01-05 22:56:23 +01:00
|
|
|
for client, config in metadata.get('backup-server/clients', {}).items():
|
|
|
|
if config.get('exclude_from_monitoring', False):
|
|
|
|
continue
|
|
|
|
|
2022-01-05 22:44:55 +01:00
|
|
|
services[f'BACKUPS FOR NODE {client}'] = {
|
2025-02-14 21:25:10 +01:00
|
|
|
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_backup_for_node {} {}'.format(
|
2022-02-12 19:04:15 +01:00
|
|
|
client,
|
|
|
|
config['one_backup_every_hours'],
|
|
|
|
),
|
2024-09-10 06:14:55 +02:00
|
|
|
'vars.sshmon_timeout': 40,
|
2022-01-05 22:44:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return {
|
|
|
|
'icinga2_api': {
|
|
|
|
'backup-server': {
|
|
|
|
'services': services,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|