from bundlewrap.exceptions import BundleError

defaults = {
    'backup-server': {
        'my_ssh_port': 22,
    },
    'openssh': {
        'allowed_users': {
            # Usernames for backup clients always start with 'c-'
            'c-*',
        },
    },
    'systemd-timers': {
        'timers': {
            'check_backup_for_node-cron': {
                'command': '/usr/local/share/icinga/plugins/check_backup_for_node-cron',
                'when': '*-*-* *:00/5:00', # every five minutes
            }
        },
    },
    'zfs': {
        # The whole point of doing backups is to keep them for a long
        # time, which eliminates the need for this check.
        'enable_old_snapshots_check': False,
    },
}

@metadata_reactor.provides(
    'backup-server/clients',
    'backup-server/my_hostname',
)
def get_my_clients(metadata):
    my_clients = {}
    retain_defaults = {
        'daily': 14,
        'weekly': 4,
        'monthly': 6,
    }

    for rnode in repo.nodes:
        if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False):
            continue

        if node.name != rnode.metadata.get('backup-client/target'):
            continue

        my_clients[rnode.name] = {
            'exclude_from_monitoring': rnode.metadata.get(
                'backup-client/exclude_from_monitoring',
                rnode.metadata.get(
                    'icinga_options/exclude_from_monitoring',
                    False,
                ),
            ),
            'one_backup_every_hours': rnode.metadata.get('backup-client/one_backup_every_hours', 24),
            'user': rnode.metadata.get('backup-client/user-name'),
            'retain': {
                'daily': rnode.metadata.get('backups/retain/daily', retain_defaults['daily']),
                'weekly': rnode.metadata.get('backups/retain/weekly', retain_defaults['weekly']),
                'monthly': rnode.metadata.get('backups/retain/monthly', retain_defaults['monthly']),
            },
        }

    return {
        'backup-server': {
            'clients': my_clients,
            'my_hostname': metadata.get('hostname'),
        },
    }


@metadata_reactor.provides(
    'backup-server/zfs-base',
    'dm-crypt/encrypted-devices',
    'zfs/pools',
)
def zfs_pool(metadata):
    if not metadata.get('backup-server/encrypted-devices', {}):
        return {}

    crypt_devices = {}
    unlock_actions = set()

    devices = metadata.get('backup-server/encrypted-devices')

    # TODO remove this once we have migrated all systems
    if isinstance(devices, dict):
        pool_devices = set()

        for number, (device, passphrase) in enumerate(sorted(devices.items())):
            crypt_devices[device] = {
                'dm-name': f'backup{number}',
                'passphrase': passphrase,
            }
            pool_devices.add(f'/dev/mapper/backup{number}')
            unlock_actions.add(f'action:dm-crypt_open_backup{number}')

        pool_config = [{
            'devices': pool_devices,
        }]

        if len(pool_devices) > 2:
            pool_config[0]['type'] = 'raidz'
        elif len(pool_devices) > 1:
            pool_config[0]['type'] = 'mirror'

    elif isinstance(devices, list):
        pool_config = []

        for idx, intended_pool in enumerate(devices):
            pool_devices = set()

            for number, (device, passphrase) in enumerate(sorted(intended_pool.items())):
                crypt_devices[device] = {
                    'dm-name': f'backup{idx}-{number}',
                    'passphrase': passphrase,
                }
                pool_devices.add(f'/dev/mapper/backup{idx}-{number}')
                unlock_actions.add(f'action:dm-crypt_open_backup{idx}-{number}')

            pool_config.append({
                'devices': pool_devices,
                'type': 'raidz',
            })
    else:
        raise BundleError(f'{node.name}: unsupported configuration for backup-server/encrypted-devices')

    return {
        'backup-server': {
            'zfs-base': 'backups',
        },
        'dm-crypt': {
            'encrypted-devices': crypt_devices,
        },
        'zfs': {
            'pools': {
                'backups': {
                    'when_creating': {
                        'config': pool_config,
                        **metadata.get('backup-server/zpool_create_options', {}),
                    },
                    'needs': unlock_actions,
                    # That's a bit hacky. We do it this way to auto-import
                    # the pool after decrypting the devices. Otherwise
                    # the pool wouldn't exist, which leads to bundlewrap
                    # trying to re-create the pool.
                    # Also, -N to not auto-mount anything.
                    'unless': 'zpool import -N backups',
                },
            },
        }
    }


@metadata_reactor.provides(
    'zfs/datasets',
    'zfs/snapshots/snapshot_never',
)
def zfs_datasets_and_snapshots(metadata):
    zfs_datasets = {}

    for client in metadata.get('backup-server/clients', {}).keys():
        dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client)

        zfs_datasets[dataset] = {
            'mountpoint': '/srv/backups/{}'.format(client),
            'compression': 'on',
        }


    return {
        'zfs': {
            'datasets': zfs_datasets,
            'snapshots': {
                'snapshot_never': {
                    metadata.get('backup-server/zfs-base'),
                },
            },
        },
    }


@metadata_reactor.provides(
    'icinga2_api/backup-server/services',
)
def monitoring(metadata):
    services = {}

    for client, config in metadata.get('backup-server/clients', {}).items():
        if config.get('exclude_from_monitoring', False):
            continue

        services[f'BACKUPS FOR NODE {client}'] = {
            'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_backup_for_node {} {}'.format(
                client,
                config['one_backup_every_hours'],
            ),
            'vars.sshmon_timeout': 40,
        }

    return {
        'icinga2_api': {
            'backup-server': {
                'services': services,
            },
        },
    }