Compare commits
3 commits
main
...
hetzner-dy
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6a57a26d3d | ||
![]() |
380eb02a6d | ||
![]() |
7ded2c6b3b |
45 changed files with 281 additions and 350 deletions
|
@ -15,15 +15,16 @@ for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True)
|
|||
line = line.decode('UTF-8')
|
||||
|
||||
if line.startswith('{}/'.format(server_settings['zfs-base'])):
|
||||
try:
|
||||
dataset, snapname = line.split('@', 1)
|
||||
dataset, snapname = line.split('@', 1)
|
||||
|
||||
dataset = dataset.split('/')[-1]
|
||||
ts, bucket = snapname.split('-', 1)
|
||||
dataset = dataset.split('/')[-1]
|
||||
ts, bucket = snapname.split('-', 1)
|
||||
|
||||
snapshots[dataset].add(int(ts))
|
||||
except Exception as e:
|
||||
print(f"Exception while parsing snapshot name {line!r}: {e!r}")
|
||||
if not ts.isdigit():
|
||||
# garbage, ignore
|
||||
continue
|
||||
|
||||
snapshots[dataset].add(int(ts))
|
||||
|
||||
backups = {}
|
||||
for dataset, snaps in snapshots.items():
|
||||
|
|
|
@ -83,24 +83,47 @@ def zfs_pool(metadata):
|
|||
|
||||
devices = metadata.get('backup-server/encrypted-devices')
|
||||
|
||||
pool_devices = set()
|
||||
# TODO remove this once we have migrated all systems
|
||||
if isinstance(devices, dict):
|
||||
pool_devices = set()
|
||||
|
||||
for device, dconfig in devices.items():
|
||||
crypt_devices[dconfig['device']] = {
|
||||
'dm-name': f'backup-{device}',
|
||||
'passphrase': dconfig['passphrase'],
|
||||
}
|
||||
pool_devices.add(f'/dev/mapper/backup-{device}')
|
||||
unlock_actions.add(f'action:dm-crypt_open_backup-{device}')
|
||||
for number, (device, passphrase) in enumerate(sorted(devices.items())):
|
||||
crypt_devices[device] = {
|
||||
'dm-name': f'backup{number}',
|
||||
'passphrase': passphrase,
|
||||
}
|
||||
pool_devices.add(f'/dev/mapper/backup{number}')
|
||||
unlock_actions.add(f'action:dm-crypt_open_backup{number}')
|
||||
|
||||
pool_config = [{
|
||||
'devices': pool_devices,
|
||||
}]
|
||||
pool_config = [{
|
||||
'devices': pool_devices,
|
||||
}]
|
||||
|
||||
if len(pool_devices) > 2:
|
||||
pool_config[0]['type'] = 'raidz'
|
||||
elif len(pool_devices) > 1:
|
||||
pool_config[0]['type'] = 'mirror'
|
||||
if len(pool_devices) > 2:
|
||||
pool_config[0]['type'] = 'raidz'
|
||||
elif len(pool_devices) > 1:
|
||||
pool_config[0]['type'] = 'mirror'
|
||||
|
||||
elif isinstance(devices, list):
|
||||
pool_config = []
|
||||
|
||||
for idx, intended_pool in enumerate(devices):
|
||||
pool_devices = set()
|
||||
|
||||
for number, (device, passphrase) in enumerate(sorted(intended_pool.items())):
|
||||
crypt_devices[device] = {
|
||||
'dm-name': f'backup{idx}-{number}',
|
||||
'passphrase': passphrase,
|
||||
}
|
||||
pool_devices.add(f'/dev/mapper/backup{idx}-{number}')
|
||||
unlock_actions.add(f'action:dm-crypt_open_backup{idx}-{number}')
|
||||
|
||||
pool_config.append({
|
||||
'devices': pool_devices,
|
||||
'type': 'raidz',
|
||||
})
|
||||
else:
|
||||
raise BundleError(f'{node.name}: unsupported configuration for backup-server/encrypted-devices')
|
||||
|
||||
return {
|
||||
'backup-server': {
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import logging
|
||||
from json import loads
|
||||
from os import environ
|
||||
from subprocess import check_output
|
||||
|
@ -13,8 +12,6 @@ PSQL_USER = environ['DB_USERNAME']
|
|||
PSQL_PASS = environ['DB_PASSWORD']
|
||||
PSQL_DB = environ['DB_DATABASE_NAME']
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
docker_networks = loads(check_output(['docker', 'network', 'inspect', 'aaarghhh']))
|
||||
|
||||
container_ip = None
|
||||
|
@ -29,11 +26,11 @@ for network in docker_networks:
|
|||
container_ip = container['IPv4Address'].split('/')[0]
|
||||
|
||||
if not container_ip:
|
||||
logging.error(f'could not find ip address for container {PSQL_HOST=} in json')
|
||||
logging.debug(f'{docker_networks=}')
|
||||
exit(0)
|
||||
print(f'could not find ip address for container {PSQL_HOST=} in json')
|
||||
print(docker_networks)
|
||||
exit(1)
|
||||
|
||||
logging.debug(f'{PSQL_HOST=} {container_ip=}')
|
||||
print(f'{PSQL_HOST=} {container_ip=}')
|
||||
|
||||
conn = psycopg2.connect(
|
||||
dbname=PSQL_DB,
|
||||
|
@ -52,7 +49,6 @@ with conn:
|
|||
}
|
||||
for i in cur.fetchall()
|
||||
}
|
||||
logging.debug(f'{albums=}')
|
||||
|
||||
with conn.cursor() as cur:
|
||||
cur.execute('SELECT "id","name" FROM users;')
|
||||
|
@ -60,28 +56,25 @@ with conn:
|
|||
i[0]: i[1]
|
||||
for i in cur.fetchall()
|
||||
}
|
||||
logging.debug(f'{users=}')
|
||||
|
||||
for album_id, album in albums.items():
|
||||
log = logging.getLogger(album["name"])
|
||||
print(f'----- working on album: {album["name"]}')
|
||||
with conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute('SELECT "usersId" FROM albums_shared_users_users WHERE "albumsId" = %s;', (album_id,))
|
||||
album_shares = [i[0] for i in cur.fetchall()]
|
||||
log.info(f'album is shared with {len(album_shares)} users')
|
||||
log.debug(f'{album_shares=}')
|
||||
print(f' album is shared with {len(album_shares)} users: {album_shares}')
|
||||
for user_id, user_name in users.items():
|
||||
if user_id == album['owner'] or user_id in album_shares:
|
||||
continue
|
||||
|
||||
log.info(f'sharing album with user {user_name}')
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
'INSERT INTO albums_shared_users_users ("albumsId","usersId","role") VALUES (%s, %s, %s);',
|
||||
(album_id, user_id, 'viewer'),
|
||||
)
|
||||
except Exception:
|
||||
log.exception('failure while creating share')
|
||||
print(f' sharing album with user {user_name} ... ', end='')
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
'INSERT INTO albums_shared_users_users ("albumsId","usersId","role") VALUES (%s, %s, %s);',
|
||||
(album_id, user_id, 'viewer'),
|
||||
)
|
||||
print('done')
|
||||
print()
|
||||
|
||||
conn.close()
|
||||
|
|
6
bundles/hetzner-dyndns/items.py
Normal file
6
bundles/hetzner-dyndns/items.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
directories['/opt/hetzner-dyndns/src'] = {}
|
||||
|
||||
git_deploy['/opt/hetzner-dyndns/src'] = {
|
||||
'repo': 'https://git.franzi.business/sophie/hetzner-dyndns.git',
|
||||
'rev': 'main',
|
||||
}
|
26
bundles/hetzner-dyndns/metadata.py
Normal file
26
bundles/hetzner-dyndns/metadata.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
defaults = {
|
||||
'systemd-timers': {
|
||||
'timers': {
|
||||
'hetzner-dyndns-update': {
|
||||
'when': 'hourly',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@metadata_reactor.provides(
|
||||
'systemd-timers/timers/hetzner-dyndns-update',
|
||||
)
|
||||
def command_template(metadata):
|
||||
empty_command = f'/usr/bin/python3 /opt/hetzner-dyndns/src/hetzner-api-dyndns.py --api_key {{}} --zone {node.metadata.get('hetzner-dyndns/zone')} --record {node.metadata.get('hetzner-dyndns/record')}'
|
||||
|
||||
return {
|
||||
'systemd-timers': {
|
||||
'timers': {
|
||||
'hetzner-dyndns-update': {
|
||||
'command': node.metadata.get('hetzner-dyndns/api_key').format_into(empty_command),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -38,8 +38,8 @@ actions['netbox_install'] = {
|
|||
'triggered': True,
|
||||
'command': ' && '.join([
|
||||
'cd /opt/netbox/src',
|
||||
'/opt/netbox/venv/bin/pip install --upgrade --upgrade-strategy=eager pip wheel setuptools django-auth-ldap gunicorn',
|
||||
'/opt/netbox/venv/bin/pip install --upgrade --upgrade-strategy=eager -r requirements.txt',
|
||||
'/opt/netbox/venv/bin/pip install --upgrade pip wheel setuptools django-auth-ldap gunicorn',
|
||||
'/opt/netbox/venv/bin/pip install --upgrade -r requirements.txt',
|
||||
]),
|
||||
'needs': {
|
||||
'pkg_apt:build-essential',
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
<?xml version="1.0" standalone='no'?>
|
||||
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
|
||||
<service-group>
|
||||
<name replace-wildcards="yes">NFS ${path} on %h</name>
|
||||
<service>
|
||||
<type>_nfs._tcp</type>
|
||||
<port>2049</port>
|
||||
<txt-record>path=${path}</txt-record>
|
||||
</service>
|
||||
</service-group>
|
|
@ -1,4 +1,4 @@
|
|||
% for path, shares in sorted(node.metadata.get('nfs-server/shares', {}).items()):
|
||||
% for path, shares in sorted(node.metadata['nfs-server']['shares'].items()):
|
||||
% for share_target, share_options in sorted(shares.items()):
|
||||
% for ip_list in repo.libs.tools.resolve_identifier(repo, share_target).values():
|
||||
% for ip in sorted(ip_list):
|
||||
|
|
|
@ -1,40 +1,25 @@
|
|||
from re import sub
|
||||
|
||||
files['/etc/exports'] = {
|
||||
'content_type': 'mako',
|
||||
'triggers': {
|
||||
'action:nfs_reload_shares',
|
||||
files = {
|
||||
'/etc/exports': {
|
||||
'content_type': 'mako',
|
||||
'triggers': {
|
||||
'action:nfs_reload_shares',
|
||||
},
|
||||
},
|
||||
'/etc/default/nfs-kernel-server': {
|
||||
'source': 'etc-default',
|
||||
'triggers': {
|
||||
'svc_systemd:nfs-server:restart',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
files['/etc/default/nfs-kernel-server'] = {
|
||||
'source': 'etc-default',
|
||||
'triggers': {
|
||||
'svc_systemd:nfs-server:restart',
|
||||
actions = {
|
||||
'nfs_reload_shares': {
|
||||
'command': 'exportfs -a',
|
||||
'triggered': True,
|
||||
},
|
||||
}
|
||||
|
||||
actions['nfs_reload_shares'] = {
|
||||
'command': 'exportfs -a',
|
||||
'triggered': True,
|
||||
svc_systemd = {
|
||||
'nfs-server': {},
|
||||
}
|
||||
|
||||
svc_systemd['nfs-server'] = {}
|
||||
|
||||
if node.has_bundle('avahi-daemon'):
|
||||
for path, shares in node.metadata.get('nfs-server/shares', {}).items():
|
||||
create_avahi_file = False
|
||||
for share_target, share_options in shares.items():
|
||||
if ',insecure,' in f',{share_options},':
|
||||
create_avahi_file = True
|
||||
|
||||
if create_avahi_file:
|
||||
share_name_normalized = sub('[^a-z0-9-_]+', '_', path)
|
||||
|
||||
files[f'/etc/avahi/services/nfs{share_name_normalized}.service'] = {
|
||||
'source': 'avahi.service',
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'path': path,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -8,11 +8,8 @@ Requires=redis.service
|
|||
User=paperless
|
||||
Group=paperless
|
||||
Environment=PAPERLESS_CONFIGURATION_PATH=/opt/paperless/paperless.conf
|
||||
Environment=GRANIAN_PORT=22070
|
||||
Environment=GRANIAN_WORKERS=4
|
||||
Environment=GRANIAN_HOST=::1
|
||||
WorkingDirectory=/opt/paperless/src/paperless-ngx/src
|
||||
ExecStart=/opt/paperless/venv/bin/granian --interface asginl --ws "paperless.asgi:application"
|
||||
ExecStart=/opt/paperless/venv/bin/gunicorn -c /opt/paperless/src/paperless-ngx/gunicorn.conf.py -b 127.0.0.1:22070 paperless.asgi:application
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
SyslogIdentifier=paperless-webserver
|
||||
|
|
|
@ -99,7 +99,7 @@ def nginx(metadata):
|
|||
'domain': metadata.get('paperless/domain'),
|
||||
'locations': {
|
||||
'/': {
|
||||
'target': 'http://[::1]:22070',
|
||||
'target': 'http://127.0.0.1:22070',
|
||||
'websockets': True,
|
||||
'proxy_set_header': {
|
||||
'X-Forwarded-Host': '$server_name',
|
||||
|
|
|
@ -2,14 +2,13 @@ from datetime import datetime
|
|||
from os import listdir
|
||||
from os.path import isfile, join
|
||||
from subprocess import check_output
|
||||
from textwrap import dedent
|
||||
|
||||
from bundlewrap.utils.ui import io
|
||||
|
||||
zone_path = join(repo.path, 'data', 'powerdns', 'files', 'bind-zones')
|
||||
|
||||
nameservers = set()
|
||||
for rnode in repo.nodes_in_group('dns'):
|
||||
for rnode in sorted(repo.nodes_in_group('dns')):
|
||||
nameservers.add(rnode.metadata.get('powerdns/my_hostname', rnode.metadata.get('hostname')))
|
||||
|
||||
my_primary_servers = set()
|
||||
|
@ -76,45 +75,25 @@ actions = {
|
|||
}
|
||||
|
||||
if node.metadata.get('powerdns/features/bind', False):
|
||||
try:
|
||||
output = check_output(['git', 'log', '-1', '--pretty=%ci']).decode('utf-8').strip()
|
||||
serial = datetime.strptime(output, '%Y-%m-%d %H:%M:%S %z').strftime('%y%m%d%H%M')
|
||||
except Exception as e:
|
||||
io.stderr(f"{node.name} Error while parsing commit time for powerdns zone serial: {e!r}")
|
||||
serial = datetime.now().strftime('%y%m%d0000')
|
||||
|
||||
HEADER = dedent(f"""
|
||||
$TTL 60
|
||||
@ IN SOA ns-mephisto.kunbox.net. hostmaster.kunbox.net. (
|
||||
{serial}
|
||||
3600
|
||||
600
|
||||
86400
|
||||
300
|
||||
)
|
||||
""").strip()
|
||||
|
||||
for ns in sorted(nameservers):
|
||||
HEADER += f"\n@ IN NS {ns}."
|
||||
|
||||
primary_zones = set()
|
||||
for zone in listdir(zone_path):
|
||||
if (
|
||||
not (
|
||||
isfile(join(zone_path, zone))
|
||||
or islink(join(zone_path, zone))
|
||||
)
|
||||
or zone.startswith(".")
|
||||
or zone.startswith("_")
|
||||
):
|
||||
if not isfile(join(zone_path, zone)) or zone.startswith(".") or zone.startswith("_"):
|
||||
continue
|
||||
|
||||
try:
|
||||
output = check_output(['git', 'log', '-1', '--pretty=%ci']).decode('utf-8').strip()
|
||||
serial = datetime.strptime(output, '%Y-%m-%d %H:%M:%S %z').strftime('%y%m%d%H%M')
|
||||
except Exception as e:
|
||||
io.stderr(f"Error while parsing commit time for {zone} serial: {e!r}")
|
||||
serial = datetime.now().strftime('%y%m%d0000')
|
||||
|
||||
primary_zones.add(zone)
|
||||
|
||||
files[f'/var/lib/powerdns/zones/{zone}'] = {
|
||||
'content_type': 'mako',
|
||||
'context': {
|
||||
'HEADER': HEADER + f"\n$ORIGIN {zone}.",
|
||||
'NAMESERVERS': '\n'.join(sorted({f'@ IN NS {ns}.' for ns in nameservers})),
|
||||
'SERIAL': serial,
|
||||
'metadata_records': node.metadata.get(f'powerdns/bind-zones/{zone}/records', []),
|
||||
},
|
||||
'source': f'bind-zones/{zone}',
|
||||
|
|
|
@ -33,12 +33,6 @@
|
|||
from => '${mail_from}',
|
||||
},
|
||||
|
||||
% if not enable_registration:
|
||||
registration => {
|
||||
disabled => 1,
|
||||
},
|
||||
% endif
|
||||
|
||||
ref => {
|
||||
issues => 'https://github.com/derf/travelynx/issues',
|
||||
source => 'https://github.com/derf/travelynx',
|
||||
|
|
|
@ -10,12 +10,11 @@ defaults = {
|
|||
'password': repo.vault.password_for('{} postgresql travelynx'.format(node.name)),
|
||||
'database': 'travelynx',
|
||||
},
|
||||
'additional_cookie_secrets': set(),
|
||||
'cookie_secret': repo.vault.password_for('{} travelynx cookie_secret'.format(node.name)),
|
||||
'enable_registration': False,
|
||||
'mail_from': 'travelynx@{}'.format(node.hostname),
|
||||
'spare_workers': 2,
|
||||
'workers': 4,
|
||||
'spare_workers': 2,
|
||||
'mail_from': 'travelynx@{}'.format(node.hostname),
|
||||
'cookie_secret': repo.vault.password_for('{} travelynx cookie_secret'.format(node.name)),
|
||||
'additional_cookie_secrets': set(),
|
||||
},
|
||||
'postgresql': {
|
||||
'roles': {
|
||||
|
|
|
@ -67,7 +67,6 @@ svc_systemd = {
|
|||
'file:/etc/systemd/system/zfs-import-scan.service.d/bundlewrap.conf',
|
||||
},
|
||||
'after': {
|
||||
'bundle:dm-crypt', # might unlock disks
|
||||
'pkg_apt:',
|
||||
},
|
||||
'before': {
|
||||
|
@ -84,7 +83,6 @@ svc_systemd = {
|
|||
},
|
||||
'zfs-mount.service': {
|
||||
'after': {
|
||||
'bundle:dm-crypt', # might unlock disks
|
||||
'pkg_apt:',
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
@ IN TXT "v=spf1 -all"
|
||||
_dmarc IN TXT "v=DMARC1; p=reject"
|
|
@ -1,11 +0,0 @@
|
|||
@ IN TXT "v=spf1 mx -all"
|
||||
@ IN MX 10 mail.franzi.business.
|
||||
_dmarc IN TXT "v=DMARC1; p=quarantine; rua=mailto:dmarc@kunbox.net; ruf=mailto:dmarc@kunbox.net; fo=0:d:s; adkim=s; aspf=s"
|
||||
_mta-sts IN TXT "v=STSv1;id=20201111;"
|
||||
_smtp._tls IN TXT "v=TLSRPTv1;rua=mailto:tlsrpt@kunbox.net"
|
||||
|
||||
mta-sts IN CNAME carlene.kunbox.net.
|
||||
|
||||
2019._domainkey IN TXT "v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwkg6UAcu3V98hal1UVf6yB0WT1CKDS0AK83CUlSP8bUwraPxkxK1nkQOUsmjbQs6a3FhdsKprMi32GeUaTVvZg81JIybPk3jNugfNWfSjs2TXPomYu+XD2pmmbR3cZlzC5NGR2nmBFt/P/S2ihPHj35KziiBIwK1TdvOi1M2+upCjK33Icco0ByCm0gJpD2O0cbqcBcUKqd6X440vYhNXH1ygp0e91P0iRnvS9sg6yD0xjD8kD6j/8GfxBY+9bpU3EvDoBgyJSbjw5b6PUVJbKMXzw1NIRNj0SXKs5BakjS8+7u62vR11IPCYRwy+yr0rDT0tNegM7gStIIgoTpOoQIDAQAB"
|
||||
|
||||
uo4anejdvvdw8bkne3kjiqavcqmj0416._domainkey IN TXT "v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnh5Ym9PO7r+wdOIKfopvHzn3KU3qT6IlCG/gvvbmIqoeFQfRbAe3gQmcG6RcLue55cJQGhI6y2r0lm59ZeoHR40aM+VabAOlplekM7xWmoXb/9vG2OZLIqAyF4I+7GQmTN6B9keBHp9SWtDUkI0B0G9neZ5MkXJP705M0duxritqQlb4YvCZwteHiyckKcg9aE9j+GF2EEawBoVDpoveoB3+wgde3lWEUjjwKFtXNXxuN354o6jgXgPNWtIEdPMLfK/o0CaCjZNlzaLTsTegY/+67hdHFqDmm8zXO9s+Xiyfq7CVq21t7wDhQ2W1agj+up6lH82FMh5rZNxJ6XB0yQIDAQAB"
|
|
@ -1,3 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
<%include file="bind-zones/_mail_NULL" />
|
|
@ -1,6 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
@ IN AAAA 2a0a:51c0:0:225::2
|
||||
@ IN A 193.135.9.29
|
||||
|
||||
<%include file="bind-zones/_mail_NULL" />
|
|
@ -1 +0,0 @@
|
|||
_parked
|
|
@ -1 +0,0 @@
|
|||
_parked
|
|
@ -1,3 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
<%include file="bind-zones/_mail_NULL" />
|
|
@ -1,3 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
<%include file="bind-zones/_mail_carlene" />
|
|
@ -1,8 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
@ IN AAAA 2a03:4000:4d:5e::1
|
||||
@ IN A 194.36.145.49
|
||||
|
||||
<%include file="bind-zones/_mail_carlene" />
|
||||
|
||||
_acme-challenge IN CNAME 63bc37c61bda3c1f4fa1f270f8890c7f89c24353.acme.ctu.cx.
|
|
@ -1,29 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
@ IN AAAA 2a0a:51c0:0:225::2
|
||||
@ IN A 193.135.9.29
|
||||
|
||||
<%include file="bind-zones/_mail_carlene" />
|
||||
|
||||
_atproto IN TXT "did=did:plc:d762mg6wvvmpeu66zojntlof"
|
||||
_token._dnswl IN TXT "gg3mbwjx9bbuo5osvh7oz6bc881wcmc"
|
||||
_matrix._tcp IN SRV 10 10 443 matrix.franzi.business.
|
||||
|
||||
; carlene
|
||||
git IN CNAME carlene.kunbox.net.
|
||||
irc IN CNAME carlene.kunbox.net.
|
||||
mail IN CNAME carlene.kunbox.net.
|
||||
matrix IN CNAME carlene.kunbox.net.
|
||||
matrix-stickers IN CNAME carlene.kunbox.net.
|
||||
netbox IN CNAME carlene.kunbox.net.
|
||||
ntfy IN CNAME carlene.kunbox.net.
|
||||
postfixadmin IN CNAME carlene.kunbox.net.
|
||||
rss IN CNAME carlene.kunbox.net.
|
||||
travelynx IN CNAME carlene.kunbox.net.
|
||||
|
||||
; icinga2
|
||||
icinga IN CNAME icinga2.kunbox.net.
|
||||
status IN CNAME icinga2.kunbox.net.
|
||||
|
||||
; pretix
|
||||
tickets IN CNAME franzi-business.cname.pretix.eu.
|
|
@ -1,4 +1,16 @@
|
|||
${HEADER}
|
||||
$TTL 60
|
||||
@ IN SOA ns-mephisto.kunbox.net. hostmaster.kunbox.net. (
|
||||
${SERIAL}
|
||||
3600
|
||||
600
|
||||
86400
|
||||
300
|
||||
)
|
||||
|
||||
|
||||
${NAMESERVERS}
|
||||
|
||||
$ORIGIN kunbox.net.
|
||||
|
||||
; ends up on carlene.kunbox.net
|
||||
@ IN A 193.135.9.29
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
_parked
|
|
@ -1,6 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
@ IN AAAA 2a0a:51c0:0:225::2
|
||||
@ IN A 193.135.9.29
|
||||
|
||||
<%include file="bind-zones/_mail_carlene" />
|
|
@ -1,14 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
@ IN AAAA 2a0a:51c0:0:225::2
|
||||
@ IN A 193.135.9.29
|
||||
|
||||
<%include file="bind-zones/_mail_carlene" />
|
||||
|
||||
@ IN TXT "google-site-verification=Xl-OBZpTL1maD2Qr8QmQ2aKRXZLnCmvddpFdrTT8L34"
|
||||
|
||||
_token._dnswl IN TXT "5mx0rv9ru8s1zz4tf4xlt48osh09czmg"
|
||||
|
||||
git IN CNAME git.franzi.business.
|
||||
grafana IN CNAME influxdb.htz-cloud.kunbox.net.
|
||||
influxdb IN CNAME influxdb.htz-cloud.kunbox.net.
|
|
@ -1 +0,0 @@
|
|||
_parked
|
|
@ -1 +0,0 @@
|
|||
_parked
|
|
@ -1 +0,0 @@
|
|||
_parked
|
|
@ -1,6 +0,0 @@
|
|||
${HEADER}
|
||||
|
||||
@ IN AAAA 2a0a:51c0:0:225::2
|
||||
@ IN A 193.135.9.29
|
||||
|
||||
<%include file="bind-zones/_mail_carlene" />
|
|
@ -1 +0,0 @@
|
|||
_parked
|
|
@ -6,7 +6,6 @@ AS_NUMBERS = {
|
|||
'htz-cloud': 4290000137,
|
||||
'ionos': 4290000002,
|
||||
'revision': 4290000078,
|
||||
'rottenraptor': 4290000030,
|
||||
}
|
||||
|
||||
WG_AUTOGEN_NODES = [
|
||||
|
|
|
@ -22,17 +22,15 @@ exclude_from_backups = true
|
|||
[metadata.backup-server.zpool_create_options]
|
||||
ashift = 12
|
||||
|
||||
[metadata.backup-server.encrypted-devices.WVT0RNKF]
|
||||
device = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi4"
|
||||
passphrase = "!bwpass:bw/backup-kunsi/ata-ST20000NM007D-3DJ103_WVT0RNKF"
|
||||
[[metadata.backup-server.encrypted-devices]]
|
||||
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-part1" = "!bwpass:bw/backup-kunsi/ata-ST18000NM0092-3CX103_ZVV06SLR-part1"
|
||||
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2-part1" = "!bwpass:bw/backup-kunsi/ata-ST18000NM0092-3CX103_ZVV0686W-part1"
|
||||
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3-part1" = "!bwpass:bw/backup-kunsi/ata-ST18000NM0092-3CX103_ZVV06JV7-part1"
|
||||
|
||||
[metadata.backup-server.encrypted-devices.WVT0V0NQ]
|
||||
device = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi5"
|
||||
passphrase = "!bwpass:bw/backup-kunsi/ata-ST20000NM007D-3DJ103_WVT0V0NQ"
|
||||
|
||||
[metadata.backup-server.encrypted-devices.WVT0W64H]
|
||||
device = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi6"
|
||||
passphrase = "!bwpass:bw/backup-kunsi/ata-ST20000NM007D-3DJ103_WVT0W64H"
|
||||
[[metadata.backup-server.encrypted-devices]]
|
||||
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi1-part2" = "!bwpass:bw/backup-kunsi/ata-ST18000NM0092-3CX103_ZVV06SLR-part2"
|
||||
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi2-part2" = "!bwpass:bw/backup-kunsi/ata-ST18000NM0092-3CX103_ZVV0686W-part2"
|
||||
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi3-part2" = "!bwpass:bw/backup-kunsi/ata-ST18000NM0092-3CX103_ZVV06JV7-part2"
|
||||
|
||||
[metadata.zfs]
|
||||
scrub_when = "Wed 08:00 Europe/Berlin"
|
||||
|
|
|
@ -37,8 +37,8 @@ imap_host = "secureimap.t-online.de"
|
|||
imap_pass = "!bwpass_attr:t-online.de/franzi.kunsmann@t-online.de:imap"
|
||||
|
||||
[metadata.forgejo]
|
||||
version = "11.0.0"
|
||||
sha1 = "3a12529ab21ca04f2b3e6cf7a6c91af18f00ee5d"
|
||||
version = "10.0.3"
|
||||
sha1 = "d1199c43de9e69f6bb8058c15290e79862913413"
|
||||
domain = "git.franzi.business"
|
||||
enable_git_hooks = true
|
||||
install_ssh_key = true
|
||||
|
@ -98,8 +98,8 @@ provisioning.shared_secret = "!decrypt:encrypt$gAAAAABfVKflEMAi07C_QGP8cy97hF-4g
|
|||
"'@kunsi:franzi.business'" = "admin"
|
||||
|
||||
[metadata.mautrix-whatsapp]
|
||||
version = "v0.12.0"
|
||||
sha1 = "02094da0a164099d4d35e5edb4b87875ad694833"
|
||||
version = "v0.11.4"
|
||||
sha1 = "71a064b82072d2cec3d655c8848af418c1f54c77"
|
||||
permissions."'@kunsi:franzi.business'" = "admin"
|
||||
[metadata.mautrix-whatsapp.homeserver]
|
||||
domain = "franzi.business"
|
||||
|
@ -110,7 +110,7 @@ domain = "rss.franzi.business"
|
|||
|
||||
[metadata.netbox]
|
||||
domain = "netbox.franzi.business"
|
||||
version = "v4.2.8"
|
||||
version = "v4.2.6"
|
||||
admins.kunsi = "hostmaster@kunbox.net"
|
||||
|
||||
[metadata.nextcloud]
|
||||
|
@ -244,13 +244,8 @@ disks = [
|
|||
"/dev/disk/by-id/nvme-SAMSUNG_MZVL22T0HBLB-00B00_S677NX0W114380",
|
||||
]
|
||||
|
||||
[metadata.systemd-timers.timers.42c3-topic]
|
||||
command = "/home/kunsi/42c3-topic.sh"
|
||||
user = "kunsi"
|
||||
when = "04:00:00 Europe/Berlin"
|
||||
|
||||
[metadata.travelynx]
|
||||
version = "2.11.24"
|
||||
version = "2.11.13"
|
||||
mail_from = "travelynx@franzi.business"
|
||||
domain = "travelynx.franzi.business"
|
||||
|
||||
|
|
4
nodes/home.mitel-rfp35.toml
Normal file
4
nodes/home.mitel-rfp35.toml
Normal file
|
@ -0,0 +1,4 @@
|
|||
dummy = true
|
||||
|
||||
[metadata.interfaces.default]
|
||||
ips = ["172.19.138.41"]
|
|
@ -42,7 +42,7 @@ nodes['home.downloadhelper'] = {
|
|||
'mounts': {
|
||||
'storage': {
|
||||
'mountpoint': '/mnt/nas',
|
||||
'serverpath': '172.19.138.20:/mnt/download',
|
||||
'serverpath': '172.19.138.20:/storage/download',
|
||||
'mount_options': {
|
||||
'retry=0',
|
||||
'rw',
|
||||
|
|
|
@ -5,6 +5,7 @@ nodes['home.nas'] = {
|
|||
'bundles': {
|
||||
'avahi-daemon',
|
||||
'backup-client',
|
||||
'dm-crypt',
|
||||
'jellyfin',
|
||||
'lm-sensors',
|
||||
'mixcloud-downloader',
|
||||
|
@ -60,7 +61,6 @@ nodes['home.nas'] = {
|
|||
},
|
||||
'backups': {
|
||||
'paths': {
|
||||
'/home/kunsi/',
|
||||
'/storage/nas/',
|
||||
},
|
||||
},
|
||||
|
@ -69,6 +69,22 @@ nodes['home.nas'] = {
|
|||
'avahi-aruba-fixup': '17,47 * * * * root /usr/bin/systemctl restart avahi-daemon.service',
|
||||
},
|
||||
},
|
||||
'dm-crypt': {
|
||||
'encrypted-devices': {
|
||||
'/dev/disk/by-id/ata-Samsung_SSD_870_QVO_8TB_S5SSNJ0X409404K': {
|
||||
'dm-name': 'sam-S5SSNJ0X409404K',
|
||||
'passphrase': bwpass.password('bw/home.nas/dmcrypt/S5SSNJ0X409404K'),
|
||||
},
|
||||
'/dev/disk/by-id/ata-Samsung_SSD_870_QVO_8TB_S5SSNJ0X409845F': {
|
||||
'dm-name': 'sam-S5SSNJ0X409845F',
|
||||
'passphrase': bwpass.password('bw/home.nas/dmcrypt/S5SSNJ0X409845F'),
|
||||
},
|
||||
'/dev/disk/by-id/ata-Samsung_SSD_870_QVO_8TB_S5SSNJ0X409870J': {
|
||||
'dm-name': 'sam-S5SSNJ0X409870J',
|
||||
'passphrase': bwpass.password('bw/home.nas/dmcrypt/S5SSNJ0X409870J'),
|
||||
},
|
||||
},
|
||||
},
|
||||
'groups': {
|
||||
'nas': {},
|
||||
},
|
||||
|
@ -80,9 +96,11 @@ nodes['home.nas'] = {
|
|||
},
|
||||
'5060/tcp': { # yate SIP
|
||||
'home.snom-wohnzimmer',
|
||||
'home.mitel-rfp35',
|
||||
},
|
||||
'5061/tcp': { # yate SIPS
|
||||
'home.snom-wohnzimmer',
|
||||
'home.mitel-rfp35',
|
||||
},
|
||||
# yate RTP uses some random UDP port. We cannot firewall
|
||||
# it, because for incoming calls the other side decides
|
||||
|
@ -92,6 +110,7 @@ nodes['home.nas'] = {
|
|||
# to deal with randomly changing IPs here.
|
||||
'*/udp': {
|
||||
'home.snom-wohnzimmer',
|
||||
'home.mitel-rfp35',
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -135,11 +154,11 @@ nodes['home.nas'] = {
|
|||
},
|
||||
'nfs-server': {
|
||||
'shares': {
|
||||
'/mnt/download': {
|
||||
'/storage/download': {
|
||||
'home.downloadhelper': 'rw,all_squash,anonuid=65534,anongid=1012,no_subtree_check',
|
||||
},
|
||||
'/storage/nas': {
|
||||
'172.19.138.0/24': 'ro,all_squash,anonuid=65534,anongid=65534,no_subtree_check,insecure',
|
||||
'172.19.138.0/24': 'ro,all_squash,anonuid=65534,anongid=65534,no_subtree_check',
|
||||
},
|
||||
'/srv/paperless': {
|
||||
'home.paperless': 'rw,all_squash,anonuid=65534,anongid=65534,no_subtree_check',
|
||||
|
@ -173,7 +192,7 @@ nodes['home.nas'] = {
|
|||
'disks': {
|
||||
'/dev/nvme0',
|
||||
|
||||
# nas/timemachine disks
|
||||
# old nas disks
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8GE15GR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJ406R',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8HJBTLR',
|
||||
|
@ -181,9 +200,10 @@ nodes['home.nas'] = {
|
|||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V8J8ZKRR',
|
||||
'/dev/disk/by-id/ata-WDC_WD6003FFBX-68MU3N0_V9JS5UYL',
|
||||
|
||||
# ssdpool disks
|
||||
'/dev/disk/by-id/ata-INTEL_SSDSC2KB960G8_PHYF244001QU960CGN',
|
||||
'/dev/disk/by-id/ata-INTEL_SSDSC2KB960G8_PHYF244002AS960CGN',
|
||||
# encrypted disks
|
||||
'/dev/disk/by-id/ata-Samsung_SSD_870_QVO_8TB_S5SSNJ0X409404K',
|
||||
'/dev/disk/by-id/ata-Samsung_SSD_870_QVO_8TB_S5SSNJ0X409845F',
|
||||
'/dev/disk/by-id/ata-Samsung_SSD_870_QVO_8TB_S5SSNJ0X409870J',
|
||||
},
|
||||
},
|
||||
'systemd-networkd': {
|
||||
|
@ -238,20 +258,6 @@ nodes['home.nas'] = {
|
|||
'zfs_arc_max_gb': 8,
|
||||
},
|
||||
'pools': {
|
||||
'ssdpool': {
|
||||
'when_creating': {
|
||||
'config': [
|
||||
{
|
||||
'type': 'mirror',
|
||||
'devices': {
|
||||
'/dev/disk/by-id/ata-INTEL_SSDSC2KB960G8_PHYF244001QU960CGN',
|
||||
'/dev/disk/by-id/ata-INTEL_SSDSC2KB960G8_PHYF244002AS960CGN',
|
||||
},
|
||||
},
|
||||
],
|
||||
'ashift': 12,
|
||||
},
|
||||
},
|
||||
'tank': {
|
||||
'when_creating': {
|
||||
'config': [
|
||||
|
@ -270,46 +276,67 @@ nodes['home.nas'] = {
|
|||
'ashift': 12,
|
||||
},
|
||||
},
|
||||
'encrypted': {
|
||||
'when_creating': {
|
||||
'config': [
|
||||
{
|
||||
'type': 'raidz',
|
||||
'devices': {
|
||||
'/dev/mapper/sam-S5SSNJ0X409404K',
|
||||
'/dev/mapper/sam-S5SSNJ0X409845F',
|
||||
'/dev/mapper/sam-S5SSNJ0X409870J',
|
||||
},
|
||||
},
|
||||
],
|
||||
'ashift': 12,
|
||||
},
|
||||
'needs': {
|
||||
'action:dm-crypt_open_sam-S5SSNJ0X409404K',
|
||||
'action:dm-crypt_open_sam-S5SSNJ0X409845F',
|
||||
'action:dm-crypt_open_sam-S5SSNJ0X409870J',
|
||||
},
|
||||
# see comment in bundle:backup-server
|
||||
'unless': 'zpool import encrypted',
|
||||
},
|
||||
},
|
||||
'datasets': {
|
||||
'ssdpool': {
|
||||
'encrypted': {
|
||||
'primarycache': 'metadata',
|
||||
},
|
||||
'ssdpool/yate': {
|
||||
'mountpoint': '/opt/yate',
|
||||
},
|
||||
'ssdpool/download': {
|
||||
'mountpoint': '/mnt/download',
|
||||
'quota': '858993459200', # 800 GB
|
||||
},
|
||||
'ssdpool/paperless': {
|
||||
'mountpoint': '/srv/paperless',
|
||||
},
|
||||
'tank': {
|
||||
'primarycache': 'metadata',
|
||||
},
|
||||
'tank/nas': {
|
||||
'encrypted/nas': {
|
||||
'acltype': 'off',
|
||||
'atime': 'off',
|
||||
'compression': 'off',
|
||||
'mountpoint': '/storage/nas',
|
||||
},
|
||||
'tank': {
|
||||
'primarycache': 'metadata',
|
||||
},
|
||||
'tank/opt-yate': {
|
||||
'mountpoint': '/opt/yate',
|
||||
},
|
||||
'tank/download': {
|
||||
'mountpoint': '/storage/download',
|
||||
},
|
||||
'tank/paperless': {
|
||||
'mountpoint': '/srv/paperless',
|
||||
},
|
||||
},
|
||||
'snapshots': {
|
||||
'retain_per_dataset': {
|
||||
'tank/nas': {
|
||||
'encrypted/nas': {
|
||||
# juuuuuuuust to be sure.
|
||||
'daily': 14,
|
||||
'weekly': 6,
|
||||
'monthly': 12,
|
||||
},
|
||||
'ssdpool/download': {
|
||||
'tank/download': {
|
||||
'hourly': 48,
|
||||
'daily': 0,
|
||||
'weekly': 0,
|
||||
'monthly': 0,
|
||||
},
|
||||
'ssdpool/paperless': {
|
||||
'tank/paperless': {
|
||||
'daily': 14,
|
||||
'weekly': 6,
|
||||
'monthly': 24,
|
||||
|
|
|
@ -49,7 +49,7 @@ nodes['home.paperless'] = {
|
|||
},
|
||||
'paperless': {
|
||||
'domain': 'paperless.home.kunbox.net',
|
||||
'version': 'v2.15.3',
|
||||
'version': 'v2.14.7',
|
||||
'timezone': 'Europe/Berlin',
|
||||
},
|
||||
'postgresql': {
|
||||
|
|
|
@ -37,7 +37,6 @@ nodes['htz-cloud.wireguard'] = {
|
|||
'172.19.137.0/24',
|
||||
'172.19.136.62/31',
|
||||
'172.19.136.64/31',
|
||||
'172.19.136.66/31',
|
||||
'192.168.100.0/24',
|
||||
},
|
||||
},
|
||||
|
@ -53,7 +52,6 @@ nodes['htz-cloud.wireguard'] = {
|
|||
'udp dport 1194 accept',
|
||||
'udp dport 51800 accept',
|
||||
'udp dport 51804 accept',
|
||||
'udp dport 51805 accept',
|
||||
|
||||
# wg.c3voc.de
|
||||
'udp dport 51801 ip saddr 185.106.84.42 accept',
|
||||
|
@ -127,13 +125,6 @@ nodes['htz-cloud.wireguard'] = {
|
|||
'my_ip': '172.19.136.66',
|
||||
'their_ip': '172.19.136.67',
|
||||
},
|
||||
'rottenraptor-vpn': {
|
||||
'endpoint': None,
|
||||
'exclude_from_monitoring': True,
|
||||
'my_port': 51805,
|
||||
'my_ip': '172.19.136.68',
|
||||
'their_ip': '172.19.136.69',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -14,18 +14,6 @@ check_command = "sshmon"
|
|||
check_command = "sshmon"
|
||||
"vars.sshmon_command" = "CT480BX500SSD1_2314E6C5C6C8"
|
||||
|
||||
[metadata.icinga2_api.smartd.services."SMART STATUS ST20000NM007D-3DJ103_WVT0RNKF"]
|
||||
check_command = "sshmon"
|
||||
"vars.sshmon_command" = "ST20000NM007D-3DJ103_WVT0RNKF"
|
||||
|
||||
[metadata.icinga2_api.smartd.services."SMART STATUS ST20000NM007D-3DJ103_WVT0V0NQ"]
|
||||
check_command = "sshmon"
|
||||
"vars.sshmon_command" = "ST20000NM007D-3DJ103_WVT0V0NQ"
|
||||
|
||||
[metadata.icinga2_api.smartd.services."SMART STATUS ST20000NM007D-3DJ103_WVT0W64H"]
|
||||
check_command = "sshmon"
|
||||
"vars.sshmon_command" = "ST20000NM007D-3DJ103_WVT0W64H"
|
||||
|
||||
[metadata.icinga2_api.smartd.services."SMART STATUS ST18000NM0092-3CX103_ZVV0686W"]
|
||||
check_command = "sshmon"
|
||||
"vars.sshmon_command" = "ST18000NM0092-3CX103_ZVV0686W"
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
hostname = "172.30.17.53"
|
||||
bundles = ["bird", "wireguard"]
|
||||
groups = ["debian-bookworm"]
|
||||
|
||||
[metadata]
|
||||
location = "rottenraptor"
|
||||
backups.exclude_from_backups = true
|
||||
icinga_options.exclude_from_monitoring = true
|
||||
|
||||
[metadata.bird]
|
||||
static_routes = [
|
||||
"172.30.17.0/24",
|
||||
]
|
||||
|
||||
[metadata.interfaces.ens18]
|
||||
ips = ["172.30.17.53/24"]
|
||||
gateway4 = "172.30.17.1"
|
||||
|
||||
[metadata.nftables.postrouting]
|
||||
"50-router" = [
|
||||
"oifname ens18 masquerade",
|
||||
]
|
||||
|
||||
[metadata.wireguard.peers."htz-cloud.wireguard"]
|
||||
my_port = 51804
|
||||
my_ip = "172.19.136.69"
|
||||
their_ip = "172.19.136.68"
|
|
@ -2,11 +2,13 @@ nodes['sophie.vmhost'] = {
|
|||
'hostname': '172.19.164.2',
|
||||
'bundles': {
|
||||
'backup-client',
|
||||
'hetzner-dyndns',
|
||||
'lm-sensors',
|
||||
'nfs-server',
|
||||
'mosquitto',
|
||||
'nfs-server',
|
||||
'smartd',
|
||||
'vmhost',
|
||||
'wireguard',
|
||||
'zfs',
|
||||
},
|
||||
'groups': {
|
||||
|
@ -21,6 +23,11 @@ nodes['sophie.vmhost'] = {
|
|||
'groups': {
|
||||
'nas': {},
|
||||
},
|
||||
'hetzner-dyndns': {
|
||||
'zone': 'sophies-kitchen.eu',
|
||||
'record': 'router.home',
|
||||
'api_key': vault.decrypt('encrypt$gAAAAABoABHrRTTyOAAFIsHK_g-bubDoNJidbAQ6_0VXyqfal8-wpVMuPPlrw-OtbI1AjNU6Rd1_gKTvwYtNYO9X6RuvuW3TCCH_eitpsoylVEQ0X6SDFNQAFfjkRlOgEiFl85oyTazl'),
|
||||
},
|
||||
'interfaces': {
|
||||
'br1': {
|
||||
'ips': {
|
||||
|
@ -66,6 +73,21 @@ nodes['sophie.vmhost'] = {
|
|||
},
|
||||
},
|
||||
},
|
||||
'nftables': {
|
||||
'forward': {
|
||||
'50-router': [
|
||||
'ct state { related, established } accept',
|
||||
'oifname br1 accept',
|
||||
],
|
||||
},
|
||||
'input': {
|
||||
'50-wireguard': [
|
||||
'udp dport 1194 accept',
|
||||
'udp dport 10348 accept',
|
||||
'udp dport 10349 accept',
|
||||
],
|
||||
},
|
||||
},
|
||||
'smartd': {
|
||||
'disks': {
|
||||
'/dev/nvme0',
|
||||
|
@ -75,6 +97,12 @@ nodes['sophie.vmhost'] = {
|
|||
'/dev/disk/by-id/ata-ST20000NM007D-3DJ103_ZVT7D6JP',
|
||||
},
|
||||
},
|
||||
'sysctl': {
|
||||
'options': {
|
||||
'net.ipv4.conf.all.forwarding': '1',
|
||||
'net.ipv6.conf.all.forwarding': '1',
|
||||
},
|
||||
},
|
||||
'systemd-networkd': {
|
||||
'bridges': {
|
||||
'br0': {
|
||||
|
@ -109,6 +137,29 @@ nodes['sophie.vmhost'] = {
|
|||
},
|
||||
},
|
||||
},
|
||||
'wireguard': {
|
||||
'snat_ip': '172.19.137.2',
|
||||
'peers': {
|
||||
'thinkpad': {
|
||||
'endpoint': None,
|
||||
'exclude_from_monitoring': True,
|
||||
'my_ip': '172.19.165.64',
|
||||
'my_port': 10348,
|
||||
'their_ip': '172.19.165.65',
|
||||
'psk': vault.decrypt('encrypt$gAAAAABoAUy3lAHfn7d9Jn4ppiPRr6LOReFGyGS4HzWC5ACHNipDFnGttnOHNji2DGIYVITzj3PosZs7PRn8BvXmwumEXNNP-G0nDucuiNNzUKuOCP4YWaF9-I1tnpmT_td3nqsCDajH'),
|
||||
'pubkey': vault.decrypt('encrypt$gAAAAABoAUxlf048ovJebqo0MlLiLHcuuTCSmnCzhxSZPrFMjRaFLW0CvC3GnVed_4n7CjjZ6ygrORSl8xyBM5hvbN0-JM_56ZZFpn1UVkizctjHjb1u2XtpGAe2nMAnq2Cdg5swgH9S'),
|
||||
},
|
||||
'smartphone': {
|
||||
'endpoint': None,
|
||||
'exclude_from_monitoring': True,
|
||||
'my_ip': '172.19.165.66',
|
||||
'my_port': 10349,
|
||||
'their_ip': '172.19.165.67',
|
||||
'psk': vault.decrypt('encrypt$gAAAAABoAUy3lAHfn7d9Jn4ppiPRr6LOReFGyGS4HzWC5ACHNipDFnGttnOHNji2DGIYVITzj3PosZs7PRn8BvXmwumEXNNP-G0nDucuiNNzUKuOCP4YWaF9-I1tnpmT_td3nqsCDajH'),
|
||||
'pubkey': vault.decrypt('encrypt$gAAAAABoAWD96YcEFsLzfOCzjS_4Hg7xX516OZ5RD_qFPSEZliaYSRMhY3uyNDtQ--e0dzEwdFHK_xGT3F7jQzYAvftH4iFtk9y3n3FNFVPxqsWckX4cJIX7ZZszbQCq8sfZZXGUR0C9'),
|
||||
},
|
||||
},
|
||||
},
|
||||
'zfs': {
|
||||
'pools': {
|
||||
'storage': {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue