Compare commits
No commits in common. "b5f93ceb4882e5d7df6b80d47c9af7c9b969f61e" and "a3300cde986a19e206d5d439b4d306e03eb250d7" have entirely different histories.
b5f93ceb48
...
a3300cde98
9 changed files with 15 additions and 195 deletions
|
@ -3,10 +3,8 @@
|
||||||
statusfile=/var/tmp/backup.monitoring
|
statusfile=/var/tmp/backup.monitoring
|
||||||
ssh_login="${username}@${server}"
|
ssh_login="${username}@${server}"
|
||||||
ssh_cmnd="ssh -o IdentityFile=/etc/backup.priv -o StrictHostKeyChecking=accept-new -p ${port}"
|
ssh_cmnd="ssh -o IdentityFile=/etc/backup.priv -o StrictHostKeyChecking=accept-new -p ${port}"
|
||||||
nodename="${node.name}"
|
|
||||||
|
|
||||||
<%text>
|
<%text>
|
||||||
[[ -n "$DEBUG" ]] && set -x
|
|
||||||
NL=$'\n'
|
NL=$'\n'
|
||||||
|
|
||||||
if ! [[ -f /etc/backup.priv ]]
|
if ! [[ -f /etc/backup.priv ]]
|
||||||
|
@ -61,8 +59,6 @@ do_backup() {
|
||||||
}
|
}
|
||||||
|
|
||||||
rsync_errors=""
|
rsync_errors=""
|
||||||
|
|
||||||
$ssh_cmnd $ssh_login "sudo /usr/local/bin/rotate-single-backup-client $nodename"
|
|
||||||
</%text>
|
</%text>
|
||||||
|
|
||||||
% for path in sorted(paths):
|
% for path in sorted(paths):
|
||||||
|
|
|
@ -1,111 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
from json import load
|
|
||||||
from subprocess import check_call, check_output
|
|
||||||
from sys import argv
|
|
||||||
from time import time
|
|
||||||
|
|
||||||
NODE = argv[1]
|
|
||||||
|
|
||||||
NOW = int(time())
|
|
||||||
DAY_SECONDS = 60 * 60 * 24
|
|
||||||
INTERVALS = {
|
|
||||||
'daily': DAY_SECONDS,
|
|
||||||
'weekly': 7 * DAY_SECONDS,
|
|
||||||
'monthly': 30 * DAY_SECONDS,
|
|
||||||
}
|
|
||||||
|
|
||||||
buckets = {}
|
|
||||||
|
|
||||||
def syslog(msg):
|
|
||||||
check_output(['logger', '-t', f'backup-{NODE}', msg])
|
|
||||||
|
|
||||||
|
|
||||||
with open(f'/etc/backup-server/config.json', 'r') as f:
|
|
||||||
server_settings = load(f)
|
|
||||||
|
|
||||||
with open(f'/etc/backup-server/clients/{NODE}', 'r') as f:
|
|
||||||
client_settings = load(f)
|
|
||||||
|
|
||||||
# get all existing snapshots for NODE
|
|
||||||
for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True).splitlines():
|
|
||||||
line = line.decode('UTF-8')
|
|
||||||
|
|
||||||
if line.startswith('{}/{}@'.format(server_settings['zfs-base'], NODE)):
|
|
||||||
_, snapname = line.split('@', 1)
|
|
||||||
|
|
||||||
if 'zfs-auto-snap' in snapname:
|
|
||||||
# migration from auto-snapshots, ignore
|
|
||||||
continue
|
|
||||||
|
|
||||||
ts, bucket = snapname.split('-', 1)
|
|
||||||
buckets.setdefault(bucket, set()).add(int(ts))
|
|
||||||
syslog(f'classified {line} as {bucket} from {ts}')
|
|
||||||
|
|
||||||
# determine if we need to create a new snapshot
|
|
||||||
for bucket in INTERVALS.keys():
|
|
||||||
snapshots = sorted(buckets.get(bucket, set()))
|
|
||||||
|
|
||||||
if snapshots:
|
|
||||||
last_snap = snapshots[-1]
|
|
||||||
delta = NOW - last_snap
|
|
||||||
fresh_age = INTERVALS[bucket] - DAY_SECONDS
|
|
||||||
|
|
||||||
if delta > fresh_age:
|
|
||||||
# last snapshot is older than what we want. create a new one.
|
|
||||||
check_call(
|
|
||||||
'zfs snapshot {}/{}@{}-{}'.format(
|
|
||||||
server_settings['zfs-base'],
|
|
||||||
NODE,
|
|
||||||
NOW,
|
|
||||||
bucket,
|
|
||||||
),
|
|
||||||
shell=True,
|
|
||||||
)
|
|
||||||
buckets.setdefault(bucket, set()).add(NOW)
|
|
||||||
syslog(f'created new snapshot {NOW}-{bucket}')
|
|
||||||
else:
|
|
||||||
syslog(f'existing snapshot {last_snap}-{bucket} is fresh enough')
|
|
||||||
else:
|
|
||||||
check_call(
|
|
||||||
'zfs snapshot {}/{}@{}-{}'.format(
|
|
||||||
server_settings['zfs-base'],
|
|
||||||
NODE,
|
|
||||||
NOW,
|
|
||||||
bucket,
|
|
||||||
),
|
|
||||||
shell=True,
|
|
||||||
)
|
|
||||||
buckets.setdefault(bucket, set()).add(NOW)
|
|
||||||
syslog(f'created initial snapshot {NOW}-{bucket}')
|
|
||||||
|
|
||||||
# finally, see if we can delete any snapshots, because they are old enough
|
|
||||||
for bucket in INTERVALS.keys():
|
|
||||||
snapshots = sorted(buckets.get(bucket, set()))
|
|
||||||
|
|
||||||
if not snapshots:
|
|
||||||
syslog(f'something is wrong, there are no snapshots for {bucket}')
|
|
||||||
continue
|
|
||||||
|
|
||||||
keep_age = INTERVALS[bucket] * client_settings[bucket]
|
|
||||||
|
|
||||||
# oldest snapshots come first
|
|
||||||
for ts in snapshots[:-int(client_settings[bucket])]:
|
|
||||||
delta = NOW - ts
|
|
||||||
|
|
||||||
if delta >= keep_age:
|
|
||||||
check_call(
|
|
||||||
'zfs destroy {}/{}@{}-{}'.format(
|
|
||||||
server_settings['zfs-base'],
|
|
||||||
NODE,
|
|
||||||
ts,
|
|
||||||
bucket,
|
|
||||||
),
|
|
||||||
shell=True,
|
|
||||||
)
|
|
||||||
syslog(f'removing snapshot {ts}-{bucket}, age {delta}, keep_age {keep_age}')
|
|
||||||
else:
|
|
||||||
syslog(f'keeping snapshot {ts}-{bucket}, age not reached')
|
|
||||||
|
|
||||||
for ts in snapshots[int(client_settings[bucket]):]:
|
|
||||||
syslog(f'keeping snapshot {ts}-{bucket}, count')
|
|
|
@ -1,3 +0,0 @@
|
||||||
% for username, nodename in sorted(clients.items()):
|
|
||||||
${username} ALL=NOPASSWD:/usr/local/bin/rotate-single-backup-client ${nodename}
|
|
||||||
% endfor
|
|
|
@ -1,41 +1,17 @@
|
||||||
repo.libs.tools.require_bundle(node, 'zfs')
|
repo.libs.tools.require_bundle(node, 'zfs')
|
||||||
|
|
||||||
from os.path import join
|
from os.path import join
|
||||||
from bundlewrap.metadata import metadata_to_json
|
|
||||||
|
|
||||||
dataset = node.metadata.get('backup-server/zfs-base')
|
dataset = node.metadata.get('backup-server/zfs-base')
|
||||||
|
|
||||||
files = {
|
|
||||||
'/etc/backup-server/config.json': {
|
|
||||||
'content': metadata_to_json({
|
|
||||||
'zfs-base': dataset,
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
'/usr/local/bin/rotate-single-backup-client': {
|
|
||||||
'mode': '0755',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
directories['/etc/backup-server/clients'] = {
|
|
||||||
'purge': True,
|
|
||||||
}
|
|
||||||
|
|
||||||
sudoers = {}
|
|
||||||
|
|
||||||
for nodename, config in node.metadata.get('backup-server/clients', {}).items():
|
for nodename, config in node.metadata.get('backup-server/clients', {}).items():
|
||||||
with open(join(repo.path, 'data', 'backup', 'keys', f'{nodename}.pub'), 'r') as f:
|
with open(join(repo.path, 'data', 'backup', 'keys', f'{nodename}.pub'), 'r') as f:
|
||||||
pubkey = f.read().strip()
|
pubkey = f.read().strip()
|
||||||
|
|
||||||
sudoers[config['user']] = nodename
|
|
||||||
|
|
||||||
users[config['user']] = {
|
users[config['user']] = {
|
||||||
'home': f'/srv/backups/{nodename}',
|
'home': f'/srv/backups/{nodename}',
|
||||||
}
|
}
|
||||||
|
|
||||||
files[f'/etc/backup-server/clients/{nodename}'] = {
|
|
||||||
'content': metadata_to_json(config['retain']),
|
|
||||||
}
|
|
||||||
|
|
||||||
files[f'/srv/backups/{nodename}/.ssh/authorized_keys'] = {
|
files[f'/srv/backups/{nodename}/.ssh/authorized_keys'] = {
|
||||||
'content': pubkey,
|
'content': pubkey,
|
||||||
'owner': config['user'],
|
'owner': config['user'],
|
||||||
|
@ -52,11 +28,3 @@ for nodename, config in node.metadata.get('backup-server/clients', {}).items():
|
||||||
f'zfs_dataset:{dataset}/{nodename}',
|
f'zfs_dataset:{dataset}/{nodename}',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
files['/etc/sudoers.d/backup-server'] = {
|
|
||||||
'source': 'sudoers',
|
|
||||||
'content_type': 'mako',
|
|
||||||
'context': {
|
|
||||||
'clients': sudoers,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
|
@ -16,11 +16,6 @@ defaults = {
|
||||||
)
|
)
|
||||||
def get_my_clients(metadata):
|
def get_my_clients(metadata):
|
||||||
my_clients = {}
|
my_clients = {}
|
||||||
retain_defaults = {
|
|
||||||
'daily': 14,
|
|
||||||
'weekly': 4,
|
|
||||||
'monthly': 6,
|
|
||||||
}
|
|
||||||
|
|
||||||
for rnode in repo.nodes:
|
for rnode in repo.nodes:
|
||||||
if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False):
|
if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False):
|
||||||
|
@ -31,11 +26,6 @@ def get_my_clients(metadata):
|
||||||
|
|
||||||
my_clients[rnode.name] = {
|
my_clients[rnode.name] = {
|
||||||
'user': rnode.metadata.get('backup-client/user-name'),
|
'user': rnode.metadata.get('backup-client/user-name'),
|
||||||
'retain': {
|
|
||||||
'daily': rnode.metadata.get('backups/retain/daily', retain_defaults['daily']),
|
|
||||||
'weekly': rnode.metadata.get('backups/retain/weekly', retain_defaults['weekly']),
|
|
||||||
'monthly': rnode.metadata.get('backups/retain/monthly', retain_defaults['monthly']),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -107,10 +97,15 @@ def zfs_pool(metadata):
|
||||||
|
|
||||||
@metadata_reactor.provides(
|
@metadata_reactor.provides(
|
||||||
'zfs/datasets',
|
'zfs/datasets',
|
||||||
'zfs/snapshots/snapshot_never',
|
'zfs/snapshots/retain_per_dataset',
|
||||||
)
|
)
|
||||||
def zfs_datasets_and_snapshots(metadata):
|
def zfs_datasets_and_snapshots(metadata):
|
||||||
zfs_datasets = {}
|
zfs_datasets = {}
|
||||||
|
zfs_retains = {}
|
||||||
|
retain_defaults = {
|
||||||
|
'weekly': 4,
|
||||||
|
'monthly': 6,
|
||||||
|
}
|
||||||
|
|
||||||
for client in metadata.get('backup-server/clients', {}).keys():
|
for client in metadata.get('backup-server/clients', {}).keys():
|
||||||
dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client)
|
dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client)
|
||||||
|
@ -120,14 +115,13 @@ def zfs_datasets_and_snapshots(metadata):
|
||||||
'compression': 'on',
|
'compression': 'on',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zfs_retains[dataset] = retain_defaults.copy()
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'zfs': {
|
'zfs': {
|
||||||
'datasets': zfs_datasets,
|
'datasets': zfs_datasets,
|
||||||
'snapshots': {
|
'snapshots': {
|
||||||
'snapshot_never': {
|
'retain_per_dataset': zfs_retains,
|
||||||
metadata.get('backup-server/zfs-base'),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,15 +31,8 @@ label = argv[1]
|
||||||
with open('/etc/zfs-snapshot-config.json', 'r') as fp:
|
with open('/etc/zfs-snapshot-config.json', 'r') as fp:
|
||||||
metadata = loads(fp.read())
|
metadata = loads(fp.read())
|
||||||
|
|
||||||
datasets = set()
|
output = check_output(['zfs', 'list', '-H', '-o', 'name']).decode('UTF-8')
|
||||||
for line in check_output(['zfs', 'list', '-H', '-o', 'name']).splitlines():
|
datasets = set(output.splitlines())
|
||||||
line = line.decode('UTF-8')
|
|
||||||
|
|
||||||
for prefix in metadata.get('snapshot_never', set()):
|
|
||||||
if line.startswith(prefix):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
datasets.add(line)
|
|
||||||
|
|
||||||
default_retain = metadata['retain_defaults'][label]
|
default_retain = metadata['retain_defaults'][label]
|
||||||
now = datetime.now().strftime('%F-%H%M')
|
now = datetime.now().strftime('%F-%H%M')
|
||||||
|
|
|
@ -50,6 +50,11 @@ nodes['home.nas'] = {
|
||||||
'exclude_from_backups': True,
|
'exclude_from_backups': True,
|
||||||
},
|
},
|
||||||
'backup-server': {
|
'backup-server': {
|
||||||
|
'clients': {
|
||||||
|
'kunsi-t470': {
|
||||||
|
'user': 'kunsi-t470',
|
||||||
|
},
|
||||||
|
},
|
||||||
'my_hostname': 'franzi-home.kunbox.net',
|
'my_hostname': 'franzi-home.kunbox.net',
|
||||||
'my_ssh_port': 2022,
|
'my_ssh_port': 2022,
|
||||||
'zfs-base': 'storage/backups',
|
'zfs-base': 'storage/backups',
|
||||||
|
|
|
@ -35,11 +35,6 @@ nodes['htz-hel.backup-kunsi'] = {
|
||||||
'clients': {
|
'clients': {
|
||||||
'kunsi-t470': {
|
'kunsi-t470': {
|
||||||
'user': 'kunsi-t470',
|
'user': 'kunsi-t470',
|
||||||
'retain': {
|
|
||||||
'daily': 30,
|
|
||||||
'weekly': 6,
|
|
||||||
'monthly': 12,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,19 +1,2 @@
|
||||||
hostname = "2a01:4f9:6b:2d99::2"
|
hostname = "2a01:4f9:6b:2d99::2"
|
||||||
dummy = true
|
dummy = true
|
||||||
|
|
||||||
# How to install:
|
|
||||||
# - Get server at Hetzner (no IPv4)
|
|
||||||
# - Install latest proxmox compatible debian
|
|
||||||
# - RAID5
|
|
||||||
# - 50G for system
|
|
||||||
# - leave rest unpartitioned
|
|
||||||
# - install zfs
|
|
||||||
# - create additional partitions for remaining disk space
|
|
||||||
# - create raidz on those partitions
|
|
||||||
# - enable ipv6 forwarding
|
|
||||||
# - install proxmox via apt
|
|
||||||
|
|
||||||
# VM config:
|
|
||||||
# - IPv6 only
|
|
||||||
# - IP from the /64 hetzner gives us
|
|
||||||
# - Gateway is the host itself, to work around the MAC filter hetzner uses
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue