diff --git a/bundles/backup-client/files/generate-backup b/bundles/backup-client/files/generate-backup index ef648f4..fa5ad60 100644 --- a/bundles/backup-client/files/generate-backup +++ b/bundles/backup-client/files/generate-backup @@ -3,10 +3,8 @@ statusfile=/var/tmp/backup.monitoring ssh_login="${username}@${server}" ssh_cmnd="ssh -o IdentityFile=/etc/backup.priv -o StrictHostKeyChecking=accept-new -p ${port}" -nodename="${node.name}" <%text> -[[ -n "$DEBUG" ]] && set -x NL=$'\n' if ! [[ -f /etc/backup.priv ]] @@ -61,8 +59,6 @@ do_backup() { } rsync_errors="" - -$ssh_cmnd $ssh_login "sudo /usr/local/bin/rotate-single-backup-client $nodename" % for path in sorted(paths): diff --git a/bundles/backup-server/files/rotate-single-backup-client b/bundles/backup-server/files/rotate-single-backup-client deleted file mode 100644 index b031866..0000000 --- a/bundles/backup-server/files/rotate-single-backup-client +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python3 - -from json import load -from subprocess import check_call, check_output -from sys import argv -from time import time - -NODE = argv[1] - -NOW = int(time()) -DAY_SECONDS = 60 * 60 * 24 -INTERVALS = { - 'daily': DAY_SECONDS, - 'weekly': 7 * DAY_SECONDS, - 'monthly': 30 * DAY_SECONDS, -} - -buckets = {} - -def syslog(msg): - check_output(['logger', '-t', f'backup-{NODE}', msg]) - - -with open(f'/etc/backup-server/config.json', 'r') as f: - server_settings = load(f) - -with open(f'/etc/backup-server/clients/{NODE}', 'r') as f: - client_settings = load(f) - -# get all existing snapshots for NODE -for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True).splitlines(): - line = line.decode('UTF-8') - - if line.startswith('{}/{}@'.format(server_settings['zfs-base'], NODE)): - _, snapname = line.split('@', 1) - - if 'zfs-auto-snap' in snapname: - # migration from auto-snapshots, ignore - continue - - ts, bucket = snapname.split('-', 1) - buckets.setdefault(bucket, set()).add(int(ts)) - syslog(f'classified {line} as {bucket} from {ts}') - -# determine if we need to create a new snapshot -for bucket in INTERVALS.keys(): - snapshots = sorted(buckets.get(bucket, set())) - - if snapshots: - last_snap = snapshots[-1] - delta = NOW - last_snap - fresh_age = INTERVALS[bucket] - DAY_SECONDS - - if delta > fresh_age: - # last snapshot is older than what we want. create a new one. - check_call( - 'zfs snapshot {}/{}@{}-{}'.format( - server_settings['zfs-base'], - NODE, - NOW, - bucket, - ), - shell=True, - ) - buckets.setdefault(bucket, set()).add(NOW) - syslog(f'created new snapshot {NOW}-{bucket}') - else: - syslog(f'existing snapshot {last_snap}-{bucket} is fresh enough') - else: - check_call( - 'zfs snapshot {}/{}@{}-{}'.format( - server_settings['zfs-base'], - NODE, - NOW, - bucket, - ), - shell=True, - ) - buckets.setdefault(bucket, set()).add(NOW) - syslog(f'created initial snapshot {NOW}-{bucket}') - -# finally, see if we can delete any snapshots, because they are old enough -for bucket in INTERVALS.keys(): - snapshots = sorted(buckets.get(bucket, set())) - - if not snapshots: - syslog(f'something is wrong, there are no snapshots for {bucket}') - continue - - keep_age = INTERVALS[bucket] * client_settings[bucket] - - # oldest snapshots come first - for ts in snapshots[:-int(client_settings[bucket])]: - delta = NOW - ts - - if delta >= keep_age: - check_call( - 'zfs destroy {}/{}@{}-{}'.format( - server_settings['zfs-base'], - NODE, - ts, - bucket, - ), - shell=True, - ) - syslog(f'removing snapshot {ts}-{bucket}, age {delta}, keep_age {keep_age}') - else: - syslog(f'keeping snapshot {ts}-{bucket}, age not reached') - - for ts in snapshots[int(client_settings[bucket]):]: - syslog(f'keeping snapshot {ts}-{bucket}, count') diff --git a/bundles/backup-server/files/sudoers b/bundles/backup-server/files/sudoers deleted file mode 100644 index a29e702..0000000 --- a/bundles/backup-server/files/sudoers +++ /dev/null @@ -1,3 +0,0 @@ -% for username, nodename in sorted(clients.items()): -${username} ALL=NOPASSWD:/usr/local/bin/rotate-single-backup-client ${nodename} -% endfor diff --git a/bundles/backup-server/items.py b/bundles/backup-server/items.py index 0b43f62..bae34a7 100644 --- a/bundles/backup-server/items.py +++ b/bundles/backup-server/items.py @@ -1,41 +1,17 @@ repo.libs.tools.require_bundle(node, 'zfs') from os.path import join -from bundlewrap.metadata import metadata_to_json dataset = node.metadata.get('backup-server/zfs-base') -files = { - '/etc/backup-server/config.json': { - 'content': metadata_to_json({ - 'zfs-base': dataset, - }), - }, - '/usr/local/bin/rotate-single-backup-client': { - 'mode': '0755', - }, -} - -directories['/etc/backup-server/clients'] = { - 'purge': True, -} - -sudoers = {} - for nodename, config in node.metadata.get('backup-server/clients', {}).items(): with open(join(repo.path, 'data', 'backup', 'keys', f'{nodename}.pub'), 'r') as f: pubkey = f.read().strip() - sudoers[config['user']] = nodename - users[config['user']] = { 'home': f'/srv/backups/{nodename}', } - files[f'/etc/backup-server/clients/{nodename}'] = { - 'content': metadata_to_json(config['retain']), - } - files[f'/srv/backups/{nodename}/.ssh/authorized_keys'] = { 'content': pubkey, 'owner': config['user'], @@ -52,11 +28,3 @@ for nodename, config in node.metadata.get('backup-server/clients', {}).items(): f'zfs_dataset:{dataset}/{nodename}', }, } - -files['/etc/sudoers.d/backup-server'] = { - 'source': 'sudoers', - 'content_type': 'mako', - 'context': { - 'clients': sudoers, - }, -} diff --git a/bundles/backup-server/metadata.py b/bundles/backup-server/metadata.py index 3a8b89e..990c39f 100644 --- a/bundles/backup-server/metadata.py +++ b/bundles/backup-server/metadata.py @@ -16,11 +16,6 @@ defaults = { ) def get_my_clients(metadata): my_clients = {} - retain_defaults = { - 'daily': 14, - 'weekly': 4, - 'monthly': 6, - } for rnode in repo.nodes: if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False): @@ -31,11 +26,6 @@ def get_my_clients(metadata): my_clients[rnode.name] = { 'user': rnode.metadata.get('backup-client/user-name'), - 'retain': { - 'daily': rnode.metadata.get('backups/retain/daily', retain_defaults['daily']), - 'weekly': rnode.metadata.get('backups/retain/weekly', retain_defaults['weekly']), - 'monthly': rnode.metadata.get('backups/retain/monthly', retain_defaults['monthly']), - }, } return { @@ -107,10 +97,15 @@ def zfs_pool(metadata): @metadata_reactor.provides( 'zfs/datasets', - 'zfs/snapshots/snapshot_never', + 'zfs/snapshots/retain_per_dataset', ) def zfs_datasets_and_snapshots(metadata): zfs_datasets = {} + zfs_retains = {} + retain_defaults = { + 'weekly': 4, + 'monthly': 6, + } for client in metadata.get('backup-server/clients', {}).keys(): dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client) @@ -120,14 +115,13 @@ def zfs_datasets_and_snapshots(metadata): 'compression': 'on', } + zfs_retains[dataset] = retain_defaults.copy() return { 'zfs': { 'datasets': zfs_datasets, 'snapshots': { - 'snapshot_never': { - metadata.get('backup-server/zfs-base'), - }, + 'retain_per_dataset': zfs_retains, }, }, } diff --git a/bundles/zfs/files/zfs-auto-snapshot b/bundles/zfs/files/zfs-auto-snapshot index 1861449..a085ea3 100644 --- a/bundles/zfs/files/zfs-auto-snapshot +++ b/bundles/zfs/files/zfs-auto-snapshot @@ -31,15 +31,8 @@ label = argv[1] with open('/etc/zfs-snapshot-config.json', 'r') as fp: metadata = loads(fp.read()) -datasets = set() -for line in check_output(['zfs', 'list', '-H', '-o', 'name']).splitlines(): - line = line.decode('UTF-8') - - for prefix in metadata.get('snapshot_never', set()): - if line.startswith(prefix): - break - else: - datasets.add(line) +output = check_output(['zfs', 'list', '-H', '-o', 'name']).decode('UTF-8') +datasets = set(output.splitlines()) default_retain = metadata['retain_defaults'][label] now = datetime.now().strftime('%F-%H%M') diff --git a/nodes/home/nas.py b/nodes/home/nas.py index fa87ce1..2e11900 100644 --- a/nodes/home/nas.py +++ b/nodes/home/nas.py @@ -50,6 +50,11 @@ nodes['home.nas'] = { 'exclude_from_backups': True, }, 'backup-server': { + 'clients': { + 'kunsi-t470': { + 'user': 'kunsi-t470', + }, + }, 'my_hostname': 'franzi-home.kunbox.net', 'my_ssh_port': 2022, 'zfs-base': 'storage/backups', diff --git a/nodes/htz-hel/backup-kunsi.py b/nodes/htz-hel/backup-kunsi.py index 5c8dad8..1fe796d 100644 --- a/nodes/htz-hel/backup-kunsi.py +++ b/nodes/htz-hel/backup-kunsi.py @@ -35,11 +35,6 @@ nodes['htz-hel.backup-kunsi'] = { 'clients': { 'kunsi-t470': { 'user': 'kunsi-t470', - 'retain': { - 'daily': 30, - 'weekly': 6, - 'monthly': 12, - }, }, }, }, diff --git a/nodes/htz-hel/proxmox-backupstorage.toml b/nodes/htz-hel/proxmox-backupstorage.toml index 0284a41..a5b7cda 100644 --- a/nodes/htz-hel/proxmox-backupstorage.toml +++ b/nodes/htz-hel/proxmox-backupstorage.toml @@ -1,19 +1,2 @@ hostname = "2a01:4f9:6b:2d99::2" dummy = true - -# How to install: -# - Get server at Hetzner (no IPv4) -# - Install latest proxmox compatible debian -# - RAID5 -# - 50G for system -# - leave rest unpartitioned -# - install zfs -# - create additional partitions for remaining disk space -# - create raidz on those partitions -# - enable ipv6 forwarding -# - install proxmox via apt - -# VM config: -# - IPv6 only -# - IP from the /64 hetzner gives us -# - Gateway is the host itself, to work around the MAC filter hetzner uses