defaults = { 'backup-server': { 'my_ssh_port': 22, }, 'openssh': { 'allowed_users': { # Usernames for backup clients always start with 'c-' 'c-*', }, }, 'zfs': { # The whole point of doing backups is to keep them for a long # time, which eliminates the need for this check. 'enable_old_snapshots_check': False, }, } @metadata_reactor.provides( 'backup-server/clients', 'backup-server/my_hostname', ) def get_my_clients(metadata): my_clients = {} retain_defaults = { 'daily': 14, 'weekly': 4, 'monthly': 6, } for rnode in repo.nodes: if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False): continue if node.name != rnode.metadata.get('backup-client/target'): continue my_clients[rnode.name] = { 'exclude_from_monitoring': rnode.metadata.get( 'backup-client/exclude_from_monitoring', rnode.metadata.get( 'icinga_options/exclude_from_monitoring', False, ), ), 'one_backup_every_hours': rnode.metadata.get('backup-client/one_backup_every_hours', 24), 'user': rnode.metadata.get('backup-client/user-name'), 'retain': { 'daily': rnode.metadata.get('backups/retain/daily', retain_defaults['daily']), 'weekly': rnode.metadata.get('backups/retain/weekly', retain_defaults['weekly']), 'monthly': rnode.metadata.get('backups/retain/monthly', retain_defaults['monthly']), }, } return { 'backup-server': { 'clients': my_clients, 'my_hostname': metadata.get('hostname'), }, } @metadata_reactor.provides( 'backup-server/zfs-base', 'dm-crypt/encrypted-devices', 'zfs/pools', ) def zfs_pool(metadata): if not metadata.get('backup-server/encrypted-devices', {}): return {} crypt_devices = {} pool_devices = set() unlock_actions = set() for number, (device, passphrase) in enumerate(sorted(metadata.get('backup-server/encrypted-devices', {}).items())): crypt_devices[device] = { 'dm-name': f'backup{number}', 'passphrase': passphrase, } pool_devices.add(f'/dev/mapper/backup{number}') unlock_actions.add(f'action:dm-crypt_open_backup{number}') pool_opts = { 'devices': pool_devices, } if len(pool_devices) > 2: pool_opts['type'] = 'raidz' elif len(pool_devices) > 1: pool_opts['type'] = 'mirror' return { 'backup-server': { 'zfs-base': 'backups', }, 'dm-crypt': { 'encrypted-devices': crypt_devices, }, 'zfs': { 'pools': { 'backups': { 'when_creating': { 'config': [ pool_opts, ], }, 'needs': unlock_actions, # That's a bit hacky. We do it this way to auto-import # the pool after decrypting the devices. Otherwise # the pool wouldn't exist, which leads to bundlewrap # trying to re-create the pool. # Also, -N to not auto-mount anything. 'unless': 'zpool import -N backups', }, }, } } @metadata_reactor.provides( 'zfs/datasets', 'zfs/snapshots/snapshot_never', ) def zfs_datasets_and_snapshots(metadata): zfs_datasets = {} for client in metadata.get('backup-server/clients', {}).keys(): dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client) zfs_datasets[dataset] = { 'mountpoint': '/srv/backups/{}'.format(client), 'compression': 'on', } return { 'zfs': { 'datasets': zfs_datasets, 'snapshots': { 'snapshot_never': { metadata.get('backup-server/zfs-base'), }, }, }, } @metadata_reactor.provides( 'icinga2_api/backup-server/services', ) def monitoring(metadata): services = {} for client, config in metadata.get('backup-server/clients', {}).items(): if config.get('exclude_from_monitoring', False): continue services[f'BACKUPS FOR NODE {client}'] = { 'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_backup_for_node {} {}'.format( client, config['one_backup_every_hours'], ), 'vars.sshmon_timeout': 20, } return { 'icinga2_api': { 'backup-server': { 'services': services, }, }, }