Compare commits

..

No commits in common. "main" and "miniserverupdates" have entirely different histories.

133 changed files with 2321 additions and 1181 deletions

3
.envrc
View file

@ -1,3 +0,0 @@
layout python3
source_env_if_exists .envrc.local

2
.gitignore vendored
View file

@ -1,5 +1,3 @@
.secrets.cfg*
__pycache__
*.swp
.direnv
.envrc.local

View file

@ -0,0 +1,5 @@
context.exec = [
{ path = "pactl" args = "load-module module-native-protocol-tcp" }
{ path = "pactl" args = "load-module module-zeroconf-discover" }
{ path = "pactl" args = "load-module module-zeroconf-publish" }
]

View file

@ -0,0 +1,3 @@
[Autologin]
User=${user}
Session=i3.desktop

View file

@ -0,0 +1,110 @@
from os import listdir
from os.path import join
actions = {
'fc-cache_flush': {
'command': 'fc-cache -f',
'triggered': True,
'needs': {
'pkg_pacman:fontconfig',
},
},
'i3pystatus_create_virtualenv': {
'command': '/usr/bin/python3 -m virtualenv -p python3 /opt/i3pystatus/venv/',
'unless': 'test -d /opt/i3pystatus/venv/',
'needs': {
'directory:/opt/i3pystatus/src',
'pkg_pacman:python-virtualenv',
},
},
'i3pystatus_install': {
'command': ' && '.join([
'cd /opt/i3pystatus/src',
'/opt/i3pystatus/venv/bin/pip install --upgrade pip colour netifaces basiciw pytz',
'/opt/i3pystatus/venv/bin/pip install --upgrade -e .',
]),
'needs': {
'action:i3pystatus_create_virtualenv',
},
'triggered': True,
},
}
directories = {
'/etc/sddm.conf.d': {
'purge': True,
},
'/opt/i3pystatus/src': {},
'/usr/share/fonts/bundlewrap': {
'purge': True,
'triggers': {
'action:fc-cache_flush',
},
},
}
svc_systemd = {
'avahi-daemon': {
'needs': {
'pkg_pacman:avahi',
},
},
'sddm': {
'needs': {
'pkg_pacman:sddm',
},
},
}
git_deploy = {
'/opt/i3pystatus/src': {
'repo': 'https://github.com/enkore/i3pystatus.git',
'rev': 'current',
'triggers': {
'action:i3pystatus_install',
},
},
}
files['/etc/pipewire/pipewire-pulse.conf.d/50-network.conf'] = {}
for filename in listdir(join(repo.path, 'data', 'arch-with-gui', 'files', 'fonts')):
if filename.startswith('.'):
continue
if filename.endswith('.vault'):
# XXX remove this once we have a new bundlewrap release
# https://github.com/bundlewrap/bundlewrap/commit/2429b153dd1ca6781cf3812e2dec9c2b646a546b
from os import environ
if environ.get('BW_VAULT_DUMMY_MODE', '0') == '1':
continue
font_name = filename[:-6]
attrs = {
'content': repo.vault.decrypt_file_as_base64(join('arch-with-gui', 'files', 'fonts', filename)),
'content_type': 'base64',
}
else:
font_name = filename
attrs = {
'source': join('fonts', filename),
'content_type': 'binary',
}
files[f'/usr/share/fonts/bundlewrap/{font_name}'] = {
'triggers': {
'action:fc-cache_flush',
},
**attrs,
}
if node.metadata.get('arch-with-gui/autologin_as', None):
files['/etc/sddm.conf.d/autologin.conf'] = {
'context': {
'user': node.metadata.get('arch-with-gui/autologin_as'),
},
'content_type': 'mako',
'before': {
'svc_systemd:sddm',
},
}

View file

@ -0,0 +1,124 @@
assert node.os == 'arch'
defaults = {
'backups': {
'paths': {
'/etc/netctl',
},
},
'icinga_options': {
'exclude_from_monitoring': True,
},
'nftables': {
'input': {
'50-avahi': {
'udp dport 5353 accept',
'udp sport 5353 accept',
},
},
},
'pacman': {
'packages': {
# fonts
'fontconfig': {},
'ttf-dejavu': {
'needed_by': {
'pkg_pacman:sddm',
},
},
# login management
'sddm': {},
# networking
'avahi': {},
'netctl': {},
'util-linux': {}, # provides rfkill
'wpa_supplicant': {},
'wpa_actiond': {},
# shell and other gui stuff
'dunst': {},
'fish': {},
'kitty': {},
'libnotify': {}, # provides notify-send
'light': {},
'redshift': {},
'rofi': {},
# sound
'calf': {},
'easyeffects': {},
'lsp-plugins': {},
'pavucontrol': {},
'pipewire': {},
'pipewire-jack': {},
'pipewire-pulse': {},
'pipewire-zeroconf': {},
'qpwgraph': {},
# window management
'i3-wm': {},
'i3lock': {},
'xss-lock': {},
# i3pystatus dependencies
'iw': {},
'wireless_tools': {},
# Xorg
'xf86-input-libinput': {},
'xf86-input-wacom': {},
'xorg-server': {},
'xorg-setxkbmap': {},
'xorg-xev': {},
'xorg-xinput': {},
'xorg-xset': {},
# all them apps
'browserpass': {},
'browserpass-firefox': {},
'ffmpeg': {},
'firefox': {},
'gimp': {},
'imagemagick': {},
'inkscape': {},
'kdenlive': {},
'maim': {},
'mosh': {},
'mosquitto': {},
'mpv': {},
'pass': {},
'pass-otp': {},
'pdftk': {},
'pwgen': {},
'qpdfview': {},
'samba': {},
'shotcut': {},
'sipcalc': {},
'the_silver_searcher': {},
'tlp': {},
'virt-manager': {},
'xclip': {},
'xdotool': {}, # needed for maim window selection
},
},
}
@metadata_reactor.provides(
'backups/paths',
)
def backup_every_user_home(metadata):
paths = set()
for user, config in metadata.get('users', {}).items():
if config.get('delete', False):
continue
paths.add(config.get('home', f'/home/{user}'))
return {
'backups': {
'paths': paths,
},
}

View file

@ -1,22 +0,0 @@
[server]
host-name=${node.name.split('.')[-1]}
use-ipv4=yes
use-ipv6=${'yes' if node.metadata.get('avahi-daemon/use-ipv6') else 'no'}
allow-interfaces=${','.join(sorted(node.metadata.get('interfaces', {}).keys()))}
ratelimit-interval-usec=1000000
ratelimit-burst=1000
[wide-area]
enable-wide-area=yes
[publish]
disable-publishing=no
disable-user-service-publishing=no
publish-hinfo=yes
publish-workstation=no
publish-aaaa-on-ipv4=no
publish-a-on-ipv6=no
[reflector]
[rlimits]

View file

@ -1,18 +0,0 @@
directories['/etc/avahi/services'] = {
'purge': True,
}
files['/etc/avahi/avahi-daemon.conf'] = {
'content_type': 'mako',
'triggers': {
'svc_systemd:avahi-daemon:restart',
},
}
svc_systemd['avahi-daemon'] = {
'needs': {
'file:/etc/avahi/avahi-daemon.conf',
'pkg_apt:avahi-daemon',
'pkg_apt:libnss-mdns',
},
}

View file

@ -1,11 +0,0 @@
defaults = {
'apt': {
'packages': {
'avahi-daemon': {},
'libnss-mdns': {},
},
},
'avahi-daemon': {
'use-ipv6': True,
}
}

View file

@ -2,6 +2,7 @@
from datetime import datetime
from json import load
from subprocess import check_output
from sys import argv, exit
from time import time
@ -17,17 +18,29 @@ try:
with open(f'/etc/backup-server/config.json', 'r') as f:
server_settings = load(f)
with open(f'/etc/backup-server/backups.json', 'r') as f:
backups = load(f)
# get all existing snapshots for NODE
for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True).splitlines():
line = line.decode('UTF-8')
if NODE not in backups:
if line.startswith('{}/{}@'.format(server_settings['zfs-base'], NODE)):
_, snapname = line.split('@', 1)
if 'zfs-auto-snap' in snapname:
# migration from auto-snapshots, ignore
continue
ts, bucket = snapname.split('-', 1)
snaps.add(int(ts))
if not snaps:
print('No backups found!')
exit(2)
delta = NOW - backups[NODE]
last_snap = sorted(snaps)[-1]
delta = NOW - last_snap
print('Last backup was on {} UTC'.format(
datetime.fromtimestamp(backups[NODE]).strftime('%Y-%m-%d %H:%M:%S'),
datetime.fromtimestamp(last_snap).strftime('%Y-%m-%d %H:%M:%S'),
))
# One day without backups is still okay. There may be fluctuations

View file

@ -1,39 +0,0 @@
#!/usr/bin/env python3
from json import load, dump
from subprocess import check_output
from shutil import move
from os import remove
from collections import defaultdict
with open('/etc/backup-server/config.json', 'r') as f:
server_settings = load(f)
snapshots = defaultdict(set)
for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True).splitlines():
line = line.decode('UTF-8')
if line.startswith('{}/'.format(server_settings['zfs-base'])):
dataset, snapname = line.split('@', 1)
dataset = dataset.split('/')[-1]
ts, bucket = snapname.split('-', 1)
if not ts.isdigit():
# garbage, ignore
continue
snapshots[dataset].add(int(ts))
backups = {}
for dataset, snaps in snapshots.items():
backups[dataset] = sorted(snaps)[-1]
with open('/etc/backup-server/backups.tmp.json', 'w') as f:
dump(backups, f)
move(
'/etc/backup-server/backups.tmp.json',
'/etc/backup-server/backups.json',
)

View file

@ -33,11 +33,12 @@ for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True)
if line.startswith('{}/{}@'.format(server_settings['zfs-base'], NODE)):
_, snapname = line.split('@', 1)
ts, bucket = snapname.split('-', 1)
if not ts.isdigit():
if 'zfs-auto-snap' in snapname:
# migration from auto-snapshots, ignore
continue
ts, bucket = snapname.split('-', 1)
buckets.setdefault(bucket, set()).add(int(ts))
syslog(f'classified {line} as {bucket} from {ts}')

View file

@ -18,9 +18,6 @@ files = {
'/usr/local/share/icinga/plugins/check_backup_for_node': {
'mode': '0755',
},
'/usr/local/share/icinga/plugins/check_backup_for_node-cron': {
'mode': '0755',
},
}
directories['/etc/backup-server/clients'] = {

View file

@ -1,5 +1,3 @@
from bundlewrap.exceptions import BundleError
defaults = {
'backup-server': {
'my_ssh_port': 22,
@ -10,14 +8,6 @@ defaults = {
'c-*',
},
},
'systemd-timers': {
'timers': {
'check_backup_for_node-cron': {
'command': '/usr/local/share/icinga/plugins/check_backup_for_node-cron',
'when': '*-*-* *:00/5:00', # every five minutes
}
},
},
'zfs': {
# The whole point of doing backups is to keep them for a long
# time, which eliminates the need for this check.
@ -79,51 +69,25 @@ def zfs_pool(metadata):
return {}
crypt_devices = {}
pool_devices = set()
unlock_actions = set()
devices = metadata.get('backup-server/encrypted-devices')
for number, (device, passphrase) in enumerate(sorted(metadata.get('backup-server/encrypted-devices', {}).items())):
crypt_devices[device] = {
'dm-name': f'backup{number}',
'passphrase': passphrase,
}
pool_devices.add(f'/dev/mapper/backup{number}')
unlock_actions.add(f'action:dm-crypt_open_backup{number}')
# TODO remove this once we have migrated all systems
if isinstance(devices, dict):
pool_devices = set()
pool_opts = {
'devices': pool_devices,
}
for number, (device, passphrase) in enumerate(sorted(devices.items())):
crypt_devices[device] = {
'dm-name': f'backup{number}',
'passphrase': passphrase,
}
pool_devices.add(f'/dev/mapper/backup{number}')
unlock_actions.add(f'action:dm-crypt_open_backup{number}')
pool_config = [{
'devices': pool_devices,
}]
if len(pool_devices) > 2:
pool_config[0]['type'] = 'raidz'
elif len(pool_devices) > 1:
pool_config[0]['type'] = 'mirror'
elif isinstance(devices, list):
pool_config = []
for idx, intended_pool in enumerate(devices):
pool_devices = set()
for number, (device, passphrase) in enumerate(sorted(intended_pool.items())):
crypt_devices[device] = {
'dm-name': f'backup{idx}-{number}',
'passphrase': passphrase,
}
pool_devices.add(f'/dev/mapper/backup{idx}-{number}')
unlock_actions.add(f'action:dm-crypt_open_backup{idx}-{number}')
pool_config.append({
'devices': pool_devices,
'type': 'raidz',
})
else:
raise BundleError(f'{node.name}: unsupported configuration for backup-server/encrypted-devices')
if len(pool_devices) > 2:
pool_opts['type'] = 'raidz'
elif len(pool_devices) > 1:
pool_opts['type'] = 'mirror'
return {
'backup-server': {
@ -136,8 +100,9 @@ def zfs_pool(metadata):
'pools': {
'backups': {
'when_creating': {
'config': pool_config,
**metadata.get('backup-server/zpool_create_options', {}),
'config': [
pool_opts,
],
},
'needs': unlock_actions,
# That's a bit hacky. We do it this way to auto-import
@ -191,7 +156,7 @@ def monitoring(metadata):
continue
services[f'BACKUPS FOR NODE {client}'] = {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_backup_for_node {} {}'.format(
'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_backup_for_node {} {}'.format(
client,
config['one_backup_every_hours'],
),

View file

@ -24,6 +24,7 @@ files = {
'before': {
'action:',
'pkg_apt:',
'pkg_pacman:',
},
},
}

View file

@ -1,5 +1,10 @@
if node.os == 'arch':
filename = '/etc/bird.conf'
else:
filename = '/etc/bird/bird.conf'
files = {
'/etc/bird/bird.conf': {
filename: {
'content_type': 'mako',
'triggers': {
'svc_systemd:bird:reload',
@ -10,7 +15,7 @@ files = {
svc_systemd = {
'bird': {
'needs': {
f'file:/etc/bird/bird.conf',
f'file:{filename}',
},
},
}

View file

@ -13,6 +13,15 @@ defaults = {
},
},
},
'pacman': {
'packages': {
'bird': {
'needed_by': {
'svc_systemd:bird',
},
},
},
},
'sysctl': {
'options': {
'net.ipv4.conf.all.forwarding': '1',

View file

@ -1,3 +1,10 @@
if node.os == 'arch':
service_name = 'cronie'
package_name = 'pkg_pacman:cronie'
else:
service_name = 'cron'
package_name = 'pkg_apt:cron'
files = {
'/etc/crontab': {
'content_type': 'mako',
@ -17,9 +24,9 @@ directories = {
}
svc_systemd = {
'cron': {
service_name: {
'needs': {
'pkg_apt:cron',
package_name,
},
},
}

View file

@ -4,4 +4,9 @@ defaults = {
'cron': {},
},
},
'pacman': {
'packages': {
'cronie': {},
},
},
}

View file

@ -18,13 +18,7 @@ try:
f'name={container_name}'
])
docker_json = loads(f"[{','.join([l for l in docker_ps.decode().splitlines() if l])}]")
containers = [
container
for container in docker_json
if container['Names'] == container_name
]
containers = loads(f"[{','.join([l for l in docker_ps.decode().splitlines() if l])}]")
if not containers:
print(f'CRITICAL: container {container_name} not found!')

View file

@ -12,13 +12,11 @@ then
exit 1
fi
PUID="$(id -u "${user}")"
PGID="$(id -g "${user}")"
PUID="$(id -u "docker-${name}")"
PGID="$(id -g "docker-${name}")"
if [ "$ACTION" == "start" ]
then
docker rm "${name}" || true
docker run -d \
--name "${name}" \
--env "PUID=$PUID" \
@ -27,28 +25,20 @@ then
% for k, v in sorted(environment.items()):
--env "${k}=${v}" \
% endfor
--network aaarghhh \
--network host \
% for host_port, container_port in sorted(ports.items()):
--publish "127.0.0.1:${host_port}:${container_port}" \
--expose "127.0.0.1:${host_port}:${container_port}" \
% endfor
% for host_path, container_path in sorted(volumes.items()):
% if host_path.startswith('/'):
--volume "${host_path}:${container_path}" \
% else:
--volume "/var/opt/docker-engine/${name}/${host_path}:${container_path}" \
% endif
% endfor
--restart unless-stopped \
% if command:
"${image}" \
"${command}"
% else:
"${image}"
% endif
elif [ "$ACTION" == "stop" ]
then
docker stop "${name}"
docker rm "${name}"
else
echo "Unknown action $ACTION"

View file

@ -28,36 +28,18 @@ files['/usr/local/share/icinga/plugins/check_docker_container'] = {
'mode': '0755',
}
actions['docker_create_nondefault_network'] = {
# <https://docs.docker.com/engine/network/#dns-services>
# By default, containers inherit the DNS settings as defined in the
# /etc/resolv.conf configuration file. Containers that attach to the
# default bridge network receive a copy of this file. Containers that
# attach to a custom network use Docker's embedded DNS server. The embedded
# DNS server forwards external DNS lookups to the DNS servers configured on
# the host.
'command': 'docker network create aaarghhh',
'unless': 'docker network ls | grep -q -F aaarghhh',
'needs': {
'svc_systemd:docker',
},
}
for app, config in node.metadata.get('docker-engine/containers', {}).items():
volumes = config.get('volumes', {})
user = config.get('user', f'docker-{app}')
files[f'/opt/docker-engine/{app}'] = {
'source': 'docker-wrapper',
'content_type': 'mako',
'context': {
'command': config.get('command'),
'environment': config.get('environment', {}),
'image': config['image'],
'name': app,
'ports': config.get('ports', {}),
'timezone': node.metadata.get('timezone'),
'user': user,
'volumes': volumes,
},
'mode': '0755',
@ -66,17 +48,16 @@ for app, config in node.metadata.get('docker-engine/containers', {}).items():
},
}
users[user] = {
users[f'docker-{app}'] = {
'home': f'/var/opt/docker-engine/{app}',
'groups': {
'docker',
},
'after': {
'action:docker_create_nondefault_network',
'svc_systemd:docker',
# provides docker group
'pkg_apt:docker-ce',
},
}
if user == f'docker-{app}':
users[user]['home'] = f'/var/opt/docker-engine/{app}'
files[f'/usr/local/lib/systemd/system/docker-{app}.service'] = {
'source': 'docker-wrapper.service',
@ -99,23 +80,20 @@ for app, config in node.metadata.get('docker-engine/containers', {}).items():
*deps,
f'file:/opt/docker-engine/{app}',
f'file:/usr/local/lib/systemd/system/docker-{app}.service',
f'user:{user}',
f'user:docker-{app}',
'svc_systemd:docker',
*set(config.get('needs', set())),
},
}
for volume in volumes:
if not volume.startswith('/'):
volume = f'/var/opt/docker-engine/{app}/{volume}'
directories[volume] = {
'owner': user,
'group': user,
directories[f'/var/opt/docker-engine/{app}/{volume}'] = {
'owner': f'docker-{app}',
'group': f'docker-{app}',
'needed_by': {
f'svc_systemd:docker-{app}',
},
# don't do anything if the directory exists, docker images
# mangle owners
'unless': f'test -d {volume}',
'unless': f'test -d /var/opt/docker-engine/{app}/{volume}',
}

View file

@ -18,17 +18,11 @@ defaults = {
'/var/opt/docker-engine',
},
},
'nftables': {
'forward': {
'docker-engine': [
'ct state { related, established } accept',
'ip saddr 172.16.0.0/12 accept',
],
},
'postrouting': {
'docker-engine': [
'ip saddr 172.16.0.0/12 masquerade',
],
'hosts': {
'entries': {
'172.17.0.1': {
'host.docker.internal',
},
},
},
'docker-engine': {

View file

@ -1,89 +0,0 @@
assert node.has_bundle('docker-engine')
defaults = {
'docker-engine': {
'containers': {
'goauthentik-server': {
'image': 'ghcr.io/goauthentik/server:latest',
'command': 'server',
'environment': {
'AUTHENTIK_POSTGRESQL__HOST': 'goauthentik-postgresql',
'AUTHENTIK_POSTGRESQL__NAME': 'goauthentik',
'AUTHENTIK_POSTGRESQL__PASSWORD': repo.vault.password_for(f'{node.name} postgresql goauthentik'),
'AUTHENTIK_POSTGRESQL__USER': 'goauthentik',
'AUTHENTIK_REDIS__HOST': 'goauthentik-redis',
'AUTHENTIK_SECRET_KEY': repo.vault.password_for(f'{node.name} goauthentik secret key'),
},
'volumes': {
'media': '/media',
'templates': '/templates',
},
'ports': {
'9000': '9000',
'9443': '9443',
},
'needs': {
'svc_systemd:docker-goauthentik-postgresql',
'svc_systemd:docker-goauthentik-redis',
},
'requires': {
'docker-goauthentik-postgresql.service',
'docker-goauthentik-redis.service',
},
},
'goauthentik-worker': {
'image': 'ghcr.io/goauthentik/server:latest',
'command': 'worker',
'user': 'docker-goauthentik-server',
'environment': {
'AUTHENTIK_POSTGRESQL__HOST': 'goauthentik-postgresql',
'AUTHENTIK_POSTGRESQL__NAME': 'goauthentik',
'AUTHENTIK_POSTGRESQL__PASSWORD': repo.vault.password_for(f'{node.name} postgresql goauthentik'),
'AUTHENTIK_POSTGRESQL__USER': 'goauthentik',
'AUTHENTIK_REDIS__HOST': 'goauthentik-redis',
'AUTHENTIK_SECRET_KEY': repo.vault.password_for(f'{node.name} goauthentik secret key'),
},
'volumes': {
'/var/opt/docker-engine/goauthentik-server/media': '/media',
'/var/opt/docker-engine/goauthentik-server/certs': '/certs',
'/var/opt/docker-engine/goauthentik-server/templates': '/templates',
},
'needs': {
'svc_systemd:docker-goauthentik-postgresql',
'svc_systemd:docker-goauthentik-redis',
},
'requires': {
'docker-goauthentik-postgresql.service',
'docker-goauthentik-redis.service',
},
},
'goauthentik-postgresql': {
'image': 'docker.io/library/postgres:16-alpine',
'environment': {
'POSTGRES_PASSWORD': repo.vault.password_for(f'{node.name} postgresql goauthentik'),
'POSTGRES_USER': 'goauthentik',
'POSTGRES_DB': 'goauthentik',
},
'volumes': {
'database': '/var/lib/postgresql/data',
},
},
'goauthentik-redis': {
'image': 'docker.io/library/redis:alpine',
},
},
},
'nginx': {
'vhosts': {
'goauthentik': {
'locations': {
'/': {
'target': 'http://127.0.0.1:9000/',
'websockets': True,
'max_body_size': '5000m',
},
},
},
},
},
}

View file

@ -1,4 +1,6 @@
assert node.has_bundle('docker-engine')
assert node.has_bundle('redis')
assert not node.has_bundle('postgresql') # docker container uses that port
defaults = {
'docker-engine': {
@ -7,29 +9,24 @@ defaults = {
'image': 'ghcr.io/imagegenius/immich:latest',
'environment': {
'DB_DATABASE_NAME': 'immich',
'DB_HOSTNAME': 'immich-postgresql',
'DB_HOSTNAME': 'host.docker.internal',
'DB_PASSWORD': repo.vault.password_for(f'{node.name} postgresql immich'),
'DB_USERNAME': 'immich',
'REDIS_HOSTNAME': 'immich-redis',
'REDIS_HOSTNAME': 'host.docker.internal',
},
'volumes': {
'config': '/config',
'libraries': '/libraries',
'photos': '/photos',
},
'ports': {
'8080': '8080',
},
'needs': {
'svc_systemd:docker-immich-postgresql',
'svc_systemd:docker-immich-redis',
'svc_systemd:docker-postgresql14',
},
'requires': {
'docker-immich-postgresql.service',
'docker-immich-redis.service',
'docker-postgresql14.service',
},
},
'immich-postgresql': {
'postgresql14': {
'image': 'tensorchord/pgvecto-rs:pg14-v0.2.0',
'environment': {
'POSTGRES_PASSWORD': repo.vault.password_for(f'{node.name} postgresql immich'),
@ -40,9 +37,6 @@ defaults = {
'database': '/var/lib/postgresql/data',
},
},
'immich-redis': {
'image': 'docker.io/redis:6.2-alpine',
},
},
},
'nginx': {
@ -52,10 +46,19 @@ defaults = {
'/': {
'target': 'http://127.0.0.1:8080/',
'websockets': True,
'max_body_size': '5000m',
'max_body_size': '500m',
},
#'/api/socket.io/': {
# 'target': 'http://127.0.0.1:8081/',
# 'websockets': True,
#},
},
},
},
},
'redis': {
'bind': '0.0.0.0',
},
}

View file

@ -20,7 +20,7 @@ def nodejs(metadata):
if version >= (1, 11, 71):
return {
'nodejs': {
'version': 22,
'version': 20,
},
}
else:

View file

@ -129,14 +129,11 @@ def notify_per_ntfy():
data=message_text,
headers=headers,
auth=(CONFIG['ntfy']['user'], CONFIG['ntfy']['password']),
timeout=10,
)
r.raise_for_status()
except Exception as e:
log_to_syslog('Sending a Notification failed: {}'.format(repr(e)))
return False
return True
def notify_per_mail():
@ -202,8 +199,7 @@ if __name__ == '__main__':
notify_per_mail()
if args.sms:
ntfy_worked = False
if CONFIG['ntfy']['user']:
ntfy_worked = notify_per_ntfy()
if not args.service_name or not ntfy_worked:
if not args.service_name:
notify_per_sms()
if CONFIG['ntfy']['user']:
notify_per_ntfy()

View file

@ -401,6 +401,22 @@ for rnode in sorted(repo.nodes):
DAYS_TO_STRING[day%7]: f'{hour}:{minute}-{hour}:{minute+15}',
},
})
elif (
rnode.has_bundle('pacman')
and rnode.metadata.get('pacman/unattended-upgrades/is_enabled', False)
):
day = rnode.metadata.get('pacman/unattended-upgrades/day')
hour = rnode.metadata.get('pacman/unattended-upgrades/hour')
minute = rnode.magic_number%30
downtimes.append({
'name': 'unattended-upgrades',
'host': rnode.name,
'comment': f'Downtime for upgrade-and-reboot of node {rnode.name}',
'times': {
DAYS_TO_STRING[day%7]: f'{hour}:{minute}-{hour}:{minute+15}',
},
})
files['/etc/icinga2/conf.d/groups.conf'] = {
'source': 'icinga2/groups.conf',

View file

@ -17,6 +17,7 @@ defaults = {
'icinga2': {},
'icinga2-ido-pgsql': {},
'icingaweb2': {},
'icingaweb2-module-monitoring': {},
'python3-easysnmp': {},
'python3-flask': {},
'snmp': {},

View file

@ -1,13 +1,10 @@
from datetime import datetime, timedelta, timezone
assert node.has_bundle('redis')
from datetime import datetime, timedelta
defaults = {
'infobeamer-cms': {
'config': {
'MAX_UPLOADS': 5,
'PREFERRED_URL_SCHEME': 'https',
'REDIS_HOST': '127.0.0.1',
'SESSION_COOKIE_NAME': '__Host-sess',
'STATIC_PATH': '/opt/infobeamer-cms/static',
'URL_KEY': repo.vault.password_for(f'{node.name} infobeamer-cms url key'),
@ -52,7 +49,7 @@ def nginx(metadata):
'infobeamer-cms/config/TIME_MIN',
)
def event_times(metadata):
event_start = datetime.strptime(metadata.get('infobeamer-cms/event_start_date'), '%Y-%m-%d').replace(tzinfo=timezone.utc)
event_start = datetime.strptime(metadata.get('infobeamer-cms/event_start_date'), '%Y-%m-%d')
event_duration = metadata.get('infobeamer-cms/event_duration_days', 4)
event_end = event_start + timedelta(days=event_duration)

View file

@ -1,10 +1,9 @@
#!/usr/bin/env python3
import logging
from datetime import datetime
from datetime import datetime, timezone
from json import dumps
from time import sleep
from zoneinfo import ZoneInfo
import paho.mqtt.client as mqtt
from requests import RequestException, get
@ -25,8 +24,7 @@ logging.basicConfig(
)
LOG = logging.getLogger("main")
TZ = ZoneInfo("Europe/Berlin")
DUMP_TIME = "0900"
MLOG = logging.getLogger("mqtt")
state = None
@ -40,10 +38,7 @@ def mqtt_out(message, level="INFO", device=None):
key = "infobeamer"
if device:
key += f"/{device['id']}"
if device["description"]:
message = f"[{device['description']}] {message}"
else:
message = f"[{device['serial']}] {message}"
message = f"[{device['description']}] {message}"
client.publish(
CONFIG["mqtt"]["topic"],
@ -66,14 +61,14 @@ def mqtt_dump_state(device):
out.append("Location: {}".format(device["location"]))
out.append("Setup: {} ({})".format(device["setup"]["name"], device["setup"]["id"]))
out.append("Resolution: {}".format(device["run"].get("resolution", "unknown")))
if not device["is_synced"]:
out.append("syncing ...")
mqtt_out(
" - ".join(out),
device=device,
)
def is_dump_time():
return datetime.now(TZ).strftime("%H%M") == DUMP_TIME
mqtt_out("Monitor starting up")
while True:
@ -86,14 +81,15 @@ while True:
r.raise_for_status()
ib_state = r.json()["devices"]
except RequestException as e:
LOG.exception("Could not get device data from info-beamer")
LOG.exception("Could not get data from info-beamer")
mqtt_out(
f"Could not get device data from info-beamer: {e!r}",
f"Could not get data from info-beamer: {e!r}",
level="WARN",
)
else:
new_state = {}
for device in sorted(ib_state, key=lambda x: x["id"]):
online_devices = set()
for device in ib_state:
did = str(device["id"])
if did in new_state:
@ -101,8 +97,7 @@ while True:
continue
new_state[did] = device
# force information output for every online device at 09:00 CE(S)T
must_dump_state = is_dump_time()
must_dump_state = False
if state is not None:
if did not in state:
@ -145,15 +140,16 @@ while True:
if device["is_online"]:
if device["maintenance"]:
mqtt_out(
"maintenance required: {}".format(
" ".join(sorted(device["maintenance"]))
),
"maintenance required: {}".format(' '.join(
sorted(device["maintenance"])
)),
level="WARN",
device=device,
)
if (
device["location"] != state[did]["location"]
device["is_synced"] != state[did]["is_synced"]
or device["location"] != state[did]["location"]
or device["setup"]["id"] != state[did]["setup"]["id"]
or device["run"].get("resolution")
!= state[did]["run"].get("resolution")
@ -165,52 +161,23 @@ while True:
else:
LOG.info("adding device {} to empty state".format(device["id"]))
if device["is_online"]:
online_devices.add(
"{} ({})".format(
device["id"],
device["description"],
)
)
state = new_state
try:
r = get(
"https://info-beamer.com/api/v1/account",
auth=("", CONFIG["api_key"]),
)
r.raise_for_status()
ib_account = r.json()
except RequestException as e:
LOG.exception("Could not get account data from info-beamer")
mqtt_out(
f"Could not get account data from info-beamer: {e!r}",
level="WARN",
)
else:
available_credits = ib_account["balance"]
if is_dump_time():
mqtt_out(f"Available Credits: {available_credits}")
if available_credits < 50:
mqtt_out(
f"balance has dropped below 50 credits! (available: {available_credits})",
level="ERROR",
)
elif available_credits < 100:
mqtt_out(
f"balance has dropped below 100 credits! (available: {available_credits})",
level="WARN",
)
for quota_name, quota_config in sorted(ib_account["quotas"].items()):
value = quota_config["count"]["value"]
limit = quota_config["count"]["limit"]
if value > limit * 0.9:
mqtt_out(
f"quota {quota_name} is over 90% (limit {limit}, value {value})",
level="ERROR",
)
elif value > limit * 0.8:
mqtt_out(
f"quota {quota_name} is over 80% (limit {limit}, value {value})",
level="WARN",
)
sleep(60)
if (
datetime.now(timezone.utc).strftime("%H%M") == "1312"
and online_devices
and int(datetime.now(timezone.utc).strftime("%S")) < 30
):
mqtt_out("Online Devices: {}".format(", ".join(sorted(online_devices))))
sleep(30)
except KeyboardInterrupt:
break

View file

@ -19,4 +19,9 @@ defaults = {
'/usr/bin/ipmitool *',
},
},
'pacman': {
'packages': {
'ipmitool': {},
},
},
}

View file

@ -12,10 +12,6 @@ actions = {
'needs': {
'svc_systemd:nginx',
},
'after': {
'svc_systemd:nginx:reload',
'svc_systemd:nginx:restart',
},
},
}

View file

@ -13,6 +13,15 @@ defaults = {
},
},
},
'pacman': {
'packages': {
'dehydrated': {
'needed_by': {
'action:letsencrypt_update_certificates',
},
},
},
},
}

View file

@ -10,4 +10,15 @@ defaults = {
},
},
},
'pacman': {
'packages': {
'lldpd': {
'needed_by': {
'directory:/etc/lldpd.d',
'file:/etc/lldpd.conf',
'svc_systemd:lldpd',
},
},
},
},
}

View file

@ -4,6 +4,11 @@ defaults = {
'lm-sensors': {},
},
},
'pacman': {
'packages': {
'lm_sensors': {},
},
},
'telegraf': {
'input_plugins': {
'builtin': {

View file

@ -0,0 +1,40 @@
server_location: 'http://[::1]:20080'
server_name: '${server_name}'
registration_shared_secret: '${reg_secret}'
admin_api_shared_secret: '${admin_secret}'
base_url: '${base_url}'
client_redirect: '${client_redirect}'
client_logo: 'static/images/element-logo.png' # use '{cwd}' for current working directory
#db: 'sqlite:///opt/matrix-registration/data/db.sqlite3'
db: 'postgresql://${database['user']}:${database['password']}@localhost/${database['database']}'
host: 'localhost'
port: 20100
rate_limit: ["100 per day", "10 per minute"]
allow_cors: false
ip_logging: false
logging:
disable_existing_loggers: false
version: 1
root:
level: DEBUG
handlers: [console]
formatters:
brief:
format: '%(name)s - %(levelname)s - %(message)s'
handlers:
console:
class: logging.StreamHandler
level: INFO
formatter: brief
stream: ext://sys.stdout
# password requirements
password:
min_length: 8
# username requirements
username:
validation_regex: [] #list of regexes that the selected username must match. Example: '[a-zA-Z]\.[a-zA-Z]'
invalidation_regex: #list of regexes that the selected username must NOT match. Example: '(admin|support)'
- '^abuse'
- 'admin'
- 'support'
- 'help'

View file

@ -0,0 +1,14 @@
[Unit]
Description=matrix-registration
After=network.target
[Service]
User=matrix-registration
Group=matrix-registration
WorkingDirectory=/opt/matrix-registration/src
ExecStart=/opt/matrix-registration/venv/bin/matrix-registration --config-path /opt/matrix-registration/config.yaml serve
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,65 @@
actions['matrix-registration_create_virtualenv'] = {
'command': '/usr/bin/python3 -m virtualenv -p python3 /opt/matrix-registration/venv/',
'unless': 'test -d /opt/matrix-registration/venv/',
'needs': {
# actually /opt/matrix-registration, but we don't create that
'directory:/opt/matrix-registration/src',
},
}
actions['matrix-registration_install'] = {
'command': ' && '.join([
'cd /opt/matrix-registration/src',
'/opt/matrix-registration/venv/bin/pip install psycopg2-binary',
'/opt/matrix-registration/venv/bin/pip install -e .',
]),
'needs': {
'action:matrix-registration_create_virtualenv',
},
'triggered': True,
}
users['matrix-registration'] = {
'home': '/opt/matrix-registration',
}
directories['/opt/matrix-registration/src'] = {}
git_deploy['/opt/matrix-registration/src'] = {
'repo': 'https://github.com/zeratax/matrix-registration.git',
'rev': 'master',
'triggers': {
'action:matrix-registration_install',
'svc_systemd:matrix-registration:restart',
},
}
files['/opt/matrix-registration/config.yaml'] = {
'content_type': 'mako',
'context': {
'admin_secret': node.metadata.get('matrix-registration/admin_secret'),
'base_url': node.metadata.get('matrix-registration/base_path', ''),
'client_redirect': node.metadata.get('matrix-registration/client_redirect'),
'database': node.metadata.get('matrix-registration/database'),
'reg_secret': node.metadata.get('matrix-synapse/registration_shared_secret'),
'server_name': node.metadata.get('matrix-synapse/server_name'),
},
'triggers': {
'svc_systemd:matrix-registration:restart',
},
}
files['/usr/local/lib/systemd/system/matrix-registration.service'] = {
'triggers': {
'action:systemd-reload',
'svc_systemd:matrix-registration:restart',
},
}
svc_systemd['matrix-registration'] = {
'needs': {
'action:matrix-registration_install',
'file:/opt/matrix-registration/config.yaml',
'file:/usr/local/lib/systemd/system/matrix-registration.service',
},
}

View file

@ -0,0 +1,25 @@
defaults = {
'bash_aliases': {
'matrix-registration': '/opt/matrix-registration/venv/bin/matrix-registration --config-path /opt/matrix-registration/config.yaml',
},
'matrix-registration': {
'admin_secret': repo.vault.password_for(f'{node.name} matrix-registration admin secret'),
'database': {
'user': 'matrix-registration',
'password': repo.vault.password_for(f'{node.name} postgresql matrix-registration'),
'database': 'matrix-registration',
},
},
'postgresql': {
'roles': {
'matrix-registration': {
'password': repo.vault.password_for(f'{node.name} postgresql matrix-registration'),
},
},
'databases': {
'matrix-registration': {
'owner': 'matrix-registration',
},
},
},
}

View file

@ -1,3 +1,8 @@
if node.has_bundle('pacman'):
package = 'pkg_pacman:nfs-utils'
else:
package = 'pkg_apt:nfs-common'
for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
data['mount'] = mount
data['mount_options'] = set(data.get('mount_options', set()))
@ -37,7 +42,7 @@ for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
'file:/etc/systemd/system/{}.automount'.format(unitname),
'directory:{}'.format(data['mountpoint']),
'svc_systemd:systemd-networkd',
'pkg_apt:nfs-common',
package,
},
}
else:
@ -53,7 +58,7 @@ for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
'file:/etc/systemd/system/{}.mount'.format(unitname),
'directory:{}'.format(data['mountpoint']),
'svc_systemd:systemd-networkd',
'pkg_apt:nfs-common',
package,
},
}

View file

@ -4,6 +4,11 @@ defaults = {
'nfs-common': {},
},
},
'pacman': {
'packages': {
'nfs-utils': {},
},
},
}
if node.has_bundle('telegraf'):

View file

@ -1,3 +1,8 @@
if node.has_bundle('pacman'):
package = 'pkg_pacman:nftables'
else:
package = 'pkg_apt:nftables'
directories = {
# used by other bundles
'/etc/nftables-rules.d': {
@ -37,7 +42,7 @@ svc_systemd = {
'nftables': {
'needs': {
'file:/etc/nftables.conf',
'pkg_apt:nftables',
package,
},
},
}

View file

@ -10,6 +10,23 @@ defaults = {
'blocked_v4': repo.libs.firewall.global_ip4_blocklist,
'blocked_v6': repo.libs.firewall.global_ip6_blocklist,
},
'pacman': {
'packages': {
'nftables': {},
# https://github.com/bundlewrap/bundlewrap/issues/688
# 'iptables': {
# 'installed': False,
# 'needed_by': {
# 'pkg_pacman:iptables-nft',
# },
# },
'iptables-nft': {
'needed_by': {
'pkg_pacman:nftables',
},
},
},
},
}
if not node.has_bundle('vmhost') and not node.has_bundle('docker-engine'):

View file

@ -0,0 +1,9 @@
[Service]
ExecStart=
ExecStart=/usr/sbin/nginx -c /etc/nginx/nginx.conf
ExecReload=
ExecReload=/bin/sh -c "/bin/kill -s HUP $(/bin/cat /var/run/nginx.pid)"
ExecStop=
ExecStop=/bin/sh -c "/bin/kill -s TERM $(/bin/cat /var/run/nginx.pid)"

View file

@ -1,4 +1,4 @@
user www-data;
user ${username};
worker_processes ${worker_processes};
pid /var/run/nginx.pid;

View file

@ -1,5 +1,12 @@
from datetime import datetime, timedelta
if node.has_bundle('pacman'):
package = 'pkg_pacman:nginx'
username = 'http'
else:
package = 'pkg_apt:nginx'
username = 'www-data'
directories = {
'/etc/nginx/sites': {
'purge': True,
@ -17,9 +24,9 @@ directories = {
},
},
'/var/log/nginx-timing': {
'owner': 'www-data',
'owner': username,
'needs': {
'pkg_apt:nginx',
package,
},
},
'/var/www': {},
@ -33,6 +40,7 @@ files = {
'/etc/nginx/nginx.conf': {
'content_type': 'mako',
'context': {
'username': username,
**node.metadata['nginx'],
},
'triggers': {
@ -61,13 +69,21 @@ files = {
'/var/www/error.html': {},
'/var/www/not_found.html': {},
}
if node.has_bundle('pacman'):
files['/etc/systemd/system/nginx.service.d/bundlewrap.conf'] = {
'source': 'arch-override.conf',
'triggers': {
'action:systemd-reload',
'svc_systemd:nginx:restart',
},
}
svc_systemd = {
'nginx': {
'needs': {
'action:generate-dhparam',
'directory:/var/log/nginx-timing',
'pkg_apt:nginx',
package,
},
},
}

View file

@ -33,6 +33,11 @@ defaults = {
'nginx': {
'worker_connections': 768,
},
'pacman': {
'packages': {
'nginx': {},
},
},
}
if node.has_bundle('telegraf'):

View file

@ -27,22 +27,29 @@ files = {
},
}
if node.has_bundle('pacman'):
package = 'pkg_pacman:openssh'
service = 'sshd'
else:
package = 'pkg_apt:openssh-server'
service = 'ssh'
actions = {
'sshd_check_config': {
'command': 'sshd -T -C user=root -C host=localhost -C addr=localhost',
'triggered': True,
'triggers': {
'svc_systemd:ssh:restart',
'svc_systemd:{}:restart'.format(service),
},
},
}
svc_systemd = {
'ssh': {
service: {
'needs': {
'file:/etc/systemd/system/ssh.service.d/bundlewrap.conf',
'file:/etc/ssh/sshd_config',
'pkg_apt:openssh-server',
package,
},
},
}

View file

@ -8,6 +8,11 @@ defaults = {
'openssh-sftp-server': {},
},
},
'pacman': {
'packages': {
'openssh': {},
},
},
}
@metadata_reactor.provides(

View file

@ -0,0 +1,38 @@
#!/bin/bash
statusfile="/var/tmp/unattended_upgrades.status"
if ! [[ -f "$statusfile" ]]
then
echo "Status file not found"
exit 3
fi
mtime=$(stat -c %Y $statusfile)
now=$(date +%s)
if (( $now - $mtime > 60*60*24*8 ))
then
echo "Status file is older than 8 days!"
exit 3
fi
exitcode=$(cat $statusfile)
case "$exitcode" in
abort_ssh)
echo "Upgrades skipped due to active SSH login"
exit 1
;;
0)
if [[ -f /var/run/reboot-required ]]
then
echo "OK, but updates require a reboot"
exit 1
else
echo "OK"
exit 0
fi
;;
*)
echo "Last exitcode was $exitcode"
exit 2
;;
esac

View file

@ -0,0 +1,18 @@
#!/bin/bash
set -xeuo pipefail
pacman -Syu --noconfirm --noprogressbar
% for affected, restarts in sorted(restart_triggers.items()):
up_since=$(systemctl show "${affected}" | sed -n 's/^ActiveEnterTimestamp=//p' || echo 0)
up_since_ts=$(date -d "$up_since" +%s || echo 0)
now=$(date +%s)
if [ $((now - up_since_ts)) -lt 3600 ]
then
% for restart in sorted(restarts):
systemctl restart "${restart}" || true
% endfor
fi
% endfor

View file

@ -0,0 +1,2 @@
# just disable faillock.
deny = 0

View file

@ -0,0 +1,52 @@
[options]
Architecture = auto
CheckSpace
Color
HoldPkg = ${' '.join(sorted(node.metadata.get('pacman/ask_before_removal')))}
ILoveCandy
IgnorePkg = ${' '.join(sorted(node.metadata.get('pacman/ignore_packages', set())))}
LocalFileSigLevel = Optional
NoExtract=${' '.join(sorted(node.metadata.get('pacman/no_extract', set())))}
ParallelDownloads = ${node.metadata.get('pacman/parallel_downloads')}
SigLevel = Required DatabaseOptional
VerbosePkgLists
% for line in sorted(node.metadata.get('pacman/additional_config', set())):
${line}
% endfor
[core]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
[extra]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
[community]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
% if node.metadata.get('pacman/enable_multilib', False):
[multilib]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
% endif
% if node.metadata.get('pacman/enable_aurto', True):
[aurto]
Server = https://aurto.kunbox.net/
SigLevel = Optional TrustAll
% endif
% if node.has_bundle('zfs'):
[archzfs]
Server = http://archzfs.com/archzfs/x86_64
% if node.metadata.get('pacman/linux-lts', False):
[zfs-linux-lts]
% else:
[zfs-linux]
% endif
Server = http://kernels.archzfs.com/$repo/
% endif

View file

@ -0,0 +1,49 @@
#!/bin/bash
# With systemd, we can force logging to the journal. This is better than
# spamming the world with cron mails. You can then view these logs using
# "journalctl -rat upgrade-and-reboot".
if which logger >/dev/null 2>&1
then
# Dump stdout and stderr to logger, which will then put everything
# into the journal.
exec 1> >(logger -t upgrade-and-reboot -p user.info)
exec 2> >(logger -t upgrade-and-reboot -p user.error)
fi
. /etc/upgrade-and-reboot.conf
echo "Starting upgrade-and-reboot for node $nodename ..."
statusfile="/var/tmp/unattended_upgrades.status"
# Workaround, because /var/tmp is usually 1777
[[ "$UID" == 0 ]] && chown root:root "$statusfile"
logins=$(ps h -C sshd -o euser | awk '$1 != "root" && $1 != "sshd" && $1 != "sshmon" && $1 != "nobody"')
if [[ -n "$logins" ]]
then
echo "Will abort now, there are active SSH logins: $logins"
echo "abort_ssh" > "$statusfile"
exit 1
fi
softlockdir=/var/lib/bundlewrap/soft-$nodename
mkdir -p "$softlockdir"
printf '{"comment": "UPDATE", "date": %s, "expiry": %s, "id": "UNATTENDED", "items": ["*"], "user": "root@localhost"}\n' \
$(date +%s) \
$(date -d 'now + 30 mins' +%s) \
>"$softlockdir"/UNATTENDED
trap 'rm -f "$softlockdir"/UNATTENDED' EXIT
do-unattended-upgrades
ret=$?
echo "$ret" > "$statusfile"
if (( $ret != 0 ))
then
exit 1
fi
systemctl reboot
echo "upgrade-and-reboot for node $nodename is DONE"

View file

@ -0,0 +1,3 @@
nodename="${node.name}"
reboot_mail_to="${node.metadata.get('apt/unattended-upgrades/reboot_mail_to', '')}"
auto_reboot_enabled="${node.metadata.get('apt/unattended-upgrades/reboot_enabled', True)}"

113
bundles/pacman/items.py Normal file
View file

@ -0,0 +1,113 @@
from bundlewrap.exceptions import BundleError
if not node.os == 'arch':
raise BundleError(f'{node.name}: bundle:pacman requires arch linux')
files = {
'/etc/pacman.conf': {
'content_type': 'mako',
},
'/etc/upgrade-and-reboot.conf': {
'content_type': 'mako',
},
'/etc/security/faillock.conf': {},
'/usr/local/sbin/upgrade-and-reboot': {
'mode': '0700',
},
'/usr/local/sbin/do-unattended-upgrades': {
'content_type': 'mako',
'mode': '0700',
'context': {
'restart_triggers': node.metadata.get('pacman/restart_triggers', {}),
}
},
'/usr/local/share/icinga/plugins/check_unattended_upgrades': {
'mode': '0755',
},
}
svc_systemd['paccache.timer'] = {
'needs': {
'pkg_pacman:pacman-contrib',
},
}
pkg_pacman = {
'at': {},
'autoconf': {},
'automake': {},
'bind': {},
'binutils': {},
'bison': {},
'bzip2': {},
'curl': {},
'dialog': {},
'diffutils': {},
'fakeroot': {},
'file': {},
'findutils': {},
'flex': {},
'fwupd': {},
'gawk': {},
'gcc': {},
'gettext': {},
'git': {},
'gnu-netcat': {},
'grep': {},
'groff': {},
'gzip': {},
'htop': {},
'jq': {},
'ldns': {},
'less': {},
'libtool': {},
'logrotate': {},
'lsof': {},
'm4': {},
'mailutils': {},
'make': {},
'man-db': {},
'man-pages': {},
'moreutils': {},
'mtr': {},
'ncdu': {},
'nmap': {},
'pacman-contrib': {},
'patch': {},
'pkgconf': {},
'python': {},
'python-setuptools': {
'needed_by': {
'pkg_pip:',
},
},
'python-pip': {
'needed_by': {
'pkg_pip:',
},
},
'python-virtualenv': {},
'rsync': {},
'run-parts': {},
'sed': {},
'tar': {},
'texinfo': {},
'tmux': {},
'tree': {},
'unzip': {},
'vim': {},
'wget': {},
'which': {},
'whois': {},
'zip': {},
}
if node.metadata.get('pacman/linux-lts', False):
pkg_pacman['linux-lts'] = {}
pkg_pacman['acpi_call-lts'] = {}
else:
pkg_pacman['linux'] = {}
pkg_pacman['acpi_call'] = {}
for pkg, config in node.metadata.get('pacman/packages', {}).items():
pkg_pacman[pkg] = config

View file

@ -0,0 +1,54 @@
defaults = {
'pacman': {
'ask_before_removal': {
'glibc',
'pacman',
},
'no_extract': {
'etc/cron.d/0hourly',
# don't install systemd-homed pam module. It produces a lot of spam in
# journal about systemd-homed not being active, so just get rid of it.
# Requires reinstall of systemd package, though
'usr/lib/security/pam_systemd_home.so',
},
'parallel_downloads': 4,
'repository': 'http://ftp.uni-kl.de/pub/linux/archlinux/$repo/os/$arch',
'unattended-upgrades': {
'day': 5,
'hour': 21,
},
},
}
@metadata_reactor.provides(
'cron/jobs/upgrade-and-reboot',
'icinga2_api/pacman/services',
)
def patchday(metadata):
if not metadata.get('pacman/unattended-upgrades/is_enabled', False):
return {}
day = metadata.get('pacman/unattended-upgrades/day')
hour = metadata.get('pacman/unattended-upgrades/hour')
return {
'cron': {
'jobs': {
'upgrade-and-reboot': '{minute} {hour} * * {day} root /usr/local/sbin/upgrade-and-reboot'.format(
minute=node.magic_number % 30,
hour=hour,
day=day,
),
},
},
'icinga2_api': {
'pacman': {
'services': {
'UNATTENDED UPGRADES': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_unattended_upgrades',
},
},
},
},
}

View file

@ -34,7 +34,7 @@ defaults = {
},
},
'nodejs': {
'version': 22,
'version': 18,
},
'postgresql': {
'roles': {

View file

@ -0,0 +1,6 @@
[Service]
# arch postfix is not set up for chrooting by default
ExecStartPre=-/usr/sbin/mkdir -p /var/spool/postfix/etc
% for file in ['/etc/localtime', '/etc/nsswitch.conf', '/etc/resolv.conf', '/etc/services']:
ExecStartPre=-/usr/sbin/cp -p ${file} /var/spool/postfix${file}
% endfor

View file

@ -25,6 +25,7 @@ inet_interfaces = 127.0.0.1
% endif
<%text>
smtp_use_tls = yes
smtp_tls_loglevel = 1
smtp_tls_note_starttls_offer = yes
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache

View file

@ -21,12 +21,13 @@ for identifier in node.metadata.get('postfix/mynetworks', set()):
netmask = '128'
mynetworks.add(f'[{ip6}]/{netmask}')
my_package = 'pkg_pacman:postfix' if node.os == 'arch' else 'pkg_apt:postfix'
files = {
'/etc/mailname': {
'content': node.metadata.get('postfix/myhostname'),
'before': {
'pkg_apt:postfix',
my_package,
},
'triggers': {
'svc_systemd:postfix:restart',
@ -81,7 +82,7 @@ actions = {
'command': 'newaliases',
'triggered': True,
'needs': {
'pkg_apt:postfix',
my_package,
},
'before': {
'svc_systemd:postfix',
@ -91,7 +92,7 @@ actions = {
'command': 'postmap hash:/etc/postfix/blocked_recipients',
'triggered': True,
'needs': {
'pkg_apt:postfix',
my_package,
},
'before': {
'svc_systemd:postfix',
@ -104,7 +105,17 @@ svc_systemd = {
'needs': {
'file:/etc/postfix/master.cf',
'file:/etc/postfix/main.cf',
'pkg_apt:postfix',
my_package,
},
},
}
if node.os == 'arch':
files['/etc/systemd/system/postfix.service.d/bundlewrap.conf'] = {
'source': 'arch-override.conf',
'content_type': 'mako',
'triggers': {
'action:systemd-reload',
'svc_systemd:postfix:restart',
},
}

View file

@ -14,7 +14,7 @@ defaults = {
'postfix': {
'services': {
'POSTFIX PROCESS': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit postfix@-',
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit postfix' + ('' if node.os == 'arch' else '@-'),
},
'POSTFIX QUEUE': {
'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_postfix_queue -w 20 -c 40 -d 50',
@ -22,6 +22,12 @@ defaults = {
},
},
},
'pacman': {
'packages': {
'postfix': {},
's-nail': {},
},
},
}
if node.has_bundle('postfixadmin'):

View file

@ -3,8 +3,6 @@ from os import listdir
from os.path import isfile, join
from subprocess import check_output
from bundlewrap.utils.ui import io
zone_path = join(repo.path, 'data', 'powerdns', 'files', 'bind-zones')
nameservers = set()
@ -81,10 +79,9 @@ if node.metadata.get('powerdns/features/bind', False):
continue
try:
output = check_output(['git', 'log', '-1', '--pretty=%ci']).decode('utf-8').strip()
output = check_output(['git', 'log', '-1', '--pretty=%ci', join(zone_path, zone)]).decode('utf-8').strip()
serial = datetime.strptime(output, '%Y-%m-%d %H:%M:%S %z').strftime('%y%m%d%H%M')
except Exception as e:
io.stderr(f"Error while parsing commit time for {zone} serial: {e!r}")
except:
serial = datetime.now().strftime('%y%m%d0000')
primary_zones.add(zone)

View file

@ -14,7 +14,7 @@ defaults = {
},
},
'nodejs': {
'version': 22,
'version': 18,
},
'users': {
'powerdnsadmin': {

View file

@ -7,6 +7,7 @@ from subprocess import check_output
from requests import get
UPDATE_URL = '${url}'
USERNAME = '${username}'
PASSWORD = '${password}'

View file

@ -5,6 +5,7 @@ from ipaddress import ip_address
from json import loads
from subprocess import check_output, run
DOMAIN = '${domain}'
# <%text>

View file

@ -1,5 +1,5 @@
assert node.has_bundle('redis'), f'{node.name}: pretalx needs redis'
assert node.has_bundle('nodejs'), f'{node.name}: pretalx needs nodejs for rebuild step'
assert node.has_bundle('nodejs'), f'{node.name}: pretalx needs nodejs for rebuild and regenerate_css step'
actions = {
'pretalx_create_virtualenv': {
@ -53,6 +53,17 @@ actions = {
},
'triggered': True,
},
'pretalx_regenerate-css': {
'command': 'sudo -u pretalx PRETALX_CONFIG_FILE=/opt/pretalx/pretalx.cfg /opt/pretalx/venv/bin/python -m pretalx regenerate_css',
'needs': {
'action:pretalx_migrate',
'directory:/opt/pretalx/data',
'directory:/opt/pretalx/static',
'file:/opt/pretalx/pretalx.cfg',
'bundle:nodejs',
},
'triggered': True,
},
}
users = {
@ -79,6 +90,7 @@ git_deploy = {
'action:pretalx_install',
'action:pretalx_migrate',
'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'svc_systemd:pretalx-web:restart',
'svc_systemd:pretalx-worker:restart',
},
@ -109,6 +121,7 @@ svc_systemd = {
'action:pretalx_install',
'action:pretalx_migrate',
'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'file:/etc/systemd/system/pretalx-web.service',
'file:/opt/pretalx/pretalx.cfg',
},
@ -117,7 +130,6 @@ svc_systemd = {
'needs': {
'action:pretalx_install',
'action:pretalx_migrate',
'action:pretalx_rebuild',
'file:/etc/systemd/system/pretalx-worker.service',
'file:/opt/pretalx/pretalx.cfg',
},
@ -192,6 +204,7 @@ for plugin_name, plugin_config in node.metadata.get('pretalx/plugins', {}).items
'triggers': {
'action:pretalx_migrate',
'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'svc_systemd:pretalx-web:restart',
'svc_systemd:pretalx-worker:restart',
},

View file

@ -27,7 +27,7 @@ defaults = {
},
},
'nodejs': {
'version': 22,
'version': 18,
},
'pretalx': {
'database': {

View file

@ -1,5 +1,3 @@
from bundlewrap.metadata import atomic
defaults = {
'apt': {
'packages': {
@ -50,16 +48,3 @@ if node.has_bundle('telegraf'):
},
},
}
@metadata_reactor.provides(
'firewall/port_rules',
)
def firewall(metadata):
return {
'firewall': {
'port_rules': {
'6379/tcp': atomic(metadata.get('redis/restrict-to', set())),
},
},
}

View file

@ -2,6 +2,7 @@ import re
from json import load
from os.path import join
with open(join(repo.path, 'configs', 'netbox', f'{node.name}.json')) as f:
netbox = load(f)

View file

@ -96,7 +96,7 @@ if 'dkim' in node.metadata.get('rspamd', {}):
},
}
dkim_key = repo.libs.faults.ensure_fault_or_none(node.metadata.get('rspamd/dkim'))
dkim_key = repo.libs.faults.ensure_fault_or_none(node.metadata['rspamd']['dkim'])
actions = {
'rspamd_assure_dkim_key_permissions': {

View file

@ -13,13 +13,6 @@ map to guest = bad user
load printers = no
usershare allow guests = yes
allow insecure wide links = yes
min protocol = SMB2
% if timemachine:
vfs objects = fruit
fruit:aapl = yes
fruit:copyfile = yes
fruit:model = MacSamba
% endif
% for name, opts in sorted(node.metadata.get('samba/shares', {}).items()):
[${name}]
@ -44,24 +37,3 @@ follow symlinks = yes
wide links = yes
% endif
% endfor
% for name in sorted(timemachine):
[timemachine-${name}]
comment = Time Machine backup for ${name}
available = yes
browseable = yes
guest ok = no
read only = false
valid users = timemachine-${name}
path = /srv/timemachine/${name}
durable handles = yes
vfs objects = catia fruit streams_xattr
fruit:delete_empty_adfiles = yes
fruit:metadata = stream
fruit:posix_rename = yes
fruit:time machine = yes
fruit:time machine max size = 2000G
fruit:veto_appledouble = no
fruit:wipe_intentionally_left_blank_rfork = yes
% endfor

View file

@ -1,21 +0,0 @@
<?xml version="1.0" standalone='no'?>
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<service-group>
<name replace-wildcards="yes">%h</name>
<service>
<type>_smb._tcp</type>
<port>445</port>
</service>
<service>
<type>_device-info._tcp</type>
<port>0</port>
<txt-record>model=RackMac1,2</txt-record>
</service>
<service>
<type>_adisk._tcp</type>
% for idx, share_name in enumerate(sorted(shares)):
<txt-record>dk${idx}=adVN=timemachine-${share_name},adVF=0x82</txt-record>
% endfor
<txt-record>sys=waMa=0,adVF=0x100</txt-record>
</service>
</service-group>

View file

@ -11,14 +11,9 @@ svc_systemd = {
},
}
timemachine_shares = node.metadata.get('samba/timemachine-shares', set())
files = {
'/etc/samba/smb.conf': {
'content_type': 'mako',
'context': {
'timemachine': timemachine_shares,
},
'triggers': {
'svc_systemd:nmbd:restart',
'svc_systemd:smbd:restart',
@ -62,24 +57,3 @@ for user, uconfig in node.metadata.get('users', {}).items():
last_action = {
f'action:smbpasswd_for_user_{user}',
}
if timemachine_shares:
assert node.has_bundle('avahi-daemon'), f'{node.name}: samba needs avahi-daemon to publish time machine shares'
files['/etc/avahi/services/timemachine.service'] = {
'content_type': 'mako',
'context': {
'shares': timemachine_shares,
},
}
for share_name in timemachine_shares:
users[f'timemachine-{share_name}'] = {
'home': f'/srv/timemachine/{share_name}',
}
directories[f'/srv/timemachine/{share_name}'] = {
'owner': f'timemachine-{share_name}',
'group': f'timemachine-{share_name}',
'mode': '0700',
}

View file

@ -24,30 +24,3 @@ def firewall(metadata):
},
},
}
@metadata_reactor.provides(
'zfs/datasets',
)
def timemachine_zfs(metadata):
shares = metadata.get('samba/timemachine-shares', set())
if not shares:
return {}
assert node.has_bundle('zfs'), f'{node.name}: time machine backups require zfs'
datasets = {
'tank/timemachine': {},
}
for share_name in shares:
datasets[f'tank/timemachine/{share_name}'] = {
'mountpoint': f'/srv/timemachine/{share_name}',
}
return {
'zfs': {
'datasets': datasets,
},
}

View file

@ -64,3 +64,12 @@ for check in {
files["/usr/local/share/icinga/plugins/check_{}".format(check)] = {
'mode': "0755",
}
if node.has_bundle('pacman'):
symlinks['/usr/lib/nagios/plugins'] = {
'target': '/usr/lib/monitoring-plugins',
'needs': {
'pkg_pacman:monitoring-plugins',
},
}

View file

@ -36,6 +36,14 @@ defaults = {
'sshmon',
},
},
'pacman': {
'packages': {
'gawk': {},
'perl-libwww': {},
'monitoring-plugins': {},
'python-requests': {},
},
},
}

View file

@ -4,4 +4,9 @@ defaults = {
'sudo': {},
},
},
'pacman': {
'packages': {
'sudo': {},
},
},
}

View file

@ -0,0 +1,13 @@
title ${config['title']}
% if 'linux' in config:
linux ${config['linux']}
% for line in config['initrd']:
initrd ${line}
% endfor
% if config.get('options', set()):
options ${' '.join(sorted(config['options']))}
% endif
% else:
efi ${config['efi']}
% endif

View file

@ -0,0 +1,5 @@
auto-entries no
auto-firmware yes
console-mode keep
default ${config['default']}
timeout ${config.get('timeout', 5)}

View file

@ -0,0 +1,9 @@
[Trigger]
Type = Package
Operation = Upgrade
Target = systemd
[Action]
Description = Gracefully upgrading systemd-boot...
When = PostTransaction
Exec = /usr/bin/systemctl restart systemd-boot-update.service

View file

@ -0,0 +1,32 @@
assert node.os == 'arch'
assert node.metadata.get('systemd-boot/default') in node.metadata.get('systemd-boot/entries')
files = {
'/etc/pacman.d/hooks/99-systemd-boot-update': {
'source': 'pacman_hook',
},
'/boot/loader/loader.conf': {
'content_type': 'mako',
'context': {
'config': node.metadata.get('systemd-boot'),
},
'mode': None,
},
}
directories = {
'/boot/loader/entries': {
'purge': True,
},
}
for entry, config in node.metadata.get('systemd-boot/entries').items():
files[f'/boot/loader/entries/{entry}.conf'] = {
'source': 'entry',
'content_type': 'mako',
'context': {
'entry': entry,
'config': config,
},
'mode': None,
}

View file

@ -1,4 +1,4 @@
timezone = node.metadata.get('timezone')
timezone = node.metadata.get('timezone', 'UTC')
actions['systemd-reload'] = {
'command': 'systemctl daemon-reload',

View file

@ -21,7 +21,6 @@ defaults = {
},
},
},
'timezone': 'UTC',
}
if not node.has_bundle('rsyslogd'):

View file

@ -25,4 +25,14 @@ defaults = {
},
},
},
'pacman': {
'packages': {
'telegraf-bin': {
'needed_by': {
'svc_systemd:telegraf',
'user:telegraf',
},
},
},
},
}

View file

@ -7,6 +7,11 @@ defaults = {
'kitty-terminfo': {},
},
},
'pacman': {
'packages': {
'kitty-terminfo': {},
},
},
'users': {
'root': {
'home': '/root',

View file

@ -24,3 +24,12 @@ if node.has_bundle('nftables') and node.has_bundle('apt'):
'svc_systemd:nftables:reload',
},
}
if node.has_bundle('pacman'):
svc_systemd['libvirtd'] = {
'running': None, # triggered via .socket
}
svc_systemd['virtlogd'] = {
'running': None, # triggered via .socket
'enabled': None, # triggered via .socket
}

View file

@ -21,6 +21,12 @@ defaults = {
},
},
},
'pacman': {
'packages': {
'edk2-ovmf': {},
'libvirt': {},
},
},
}
if node.os == 'debian' and node.os_version[0] < 11:
@ -36,6 +42,9 @@ if node.has_bundle('nftables'):
},
}
if node.has_bundle('arch-with-gui'):
defaults['pacman']['packages']['virt-manager'] = {}
@metadata_reactor.provides(
'users',

View file

@ -0,0 +1,16 @@
[Unit]
Description=CRS runner for ${script}
After=network.target
[Service]
User=voc
Group=voc
EnvironmentFile=/etc/default/crs-worker
ExecStart=/opt/crs-scripts/bin/crs_run ${script}
WorkingDirectory=/opt/crs-scripts
Restart=on-failure
RestartSec=10
SyslogIdentifier=crs-${worker}
[Install]
WantedBy=crs-worker.target

View file

@ -0,0 +1,6 @@
CRS_TRACKER=${url}
CRS_TOKEN=${token}
CRS_SECRET=${secret}
% if use_vaapi:
CRS_USE_VAAPI=yes
% endif

View file

@ -0,0 +1,56 @@
paths = { # subpaths of /video
'capture',
'encoded',
'fuse',
'intros',
'repair',
'tmp',
}
directories = {
'/opt/crs-scripts': {},
}
for path in paths:
directories[f'/video/{path}'] = {
'owner': 'voc',
'group': 'voc',
}
git_deploy = {
'/opt/crs-scripts': {
'repo': 'https://github.com/crs-tools/crs-scripts.git',
'rev': 'master',
},
}
files = {
'/etc/default/crs-worker': {
'content_type': 'mako',
'source': 'environment',
'context': node.metadata.get('voc-tracker-worker'),
},
}
for worker, script in {
'recording-scheduler': 'script-A-recording-scheduler.pl',
'mount4cut': 'script-B-mount4cut.pl',
'cut-postprocessor': 'script-C-cut-postprocessor.pl',
'encoding': 'script-D-encoding.pl',
'postencoding': 'script-E-postencoding-auphonic.pl',
'postprocessing': 'script-F-postprocessing-upload.pl',
}.items():
files[f'/etc/systemd/system/crs-{worker}.service'] = {
'content_type': 'mako',
'source': 'crs-runner.service',
'context': {
'worker': worker,
'script': script,
},
'needs': {
'file:/etc/default/crs-worker',
},
'triggers': {
'action:systemd-reload',
},
}

View file

@ -0,0 +1,52 @@
defaults = {
'apt': {
'packages': {
'ffmpeg': {},
'fuse': {},
'fuse-ts': {},
'libboolean-perl': {},
'libconfig-inifiles-perl': {},
'libdatetime-perl': {},
'libfile-which-perl': {},
'libipc-run3-perl': {},
'libjson-perl': {},
'libmath-round-perl': {},
'libproc-processtable-perl': {},
'libwww-curl-perl': {},
'libxml-rpc-fast-perl': {},
'libxml-simple-perl': {},
},
},
'voc-tracker-worker': {
'use_vaapi': False,
},
'users': {
'voc': {
'home': '/opt/voc',
},
},
'pacman': {
'packages': {
'ffmpeg': {},
'fuse2': {},
'fuse3': {},
# fuse-ts missing
'perl-boolean': {}, # from aurto
'perl-config-inifiles': {},
'perl-datetime': {},
'perl-file-which': {},
'perl-ipc-run3': {},
'perl-json': {},
'perl-math-round': {},
'perl-proc-processtable': {},
'perl-www-curl': {}, # from aurto
'perl-xml-simple': {},
},
},
}
# Install manually from CPAN:
# IO::Socket::SSL
# LWP::Protocol::https
# Types::Serialiser::Error
# XML::RPC::Fast

View file

@ -283,7 +283,7 @@ def interface_ips(metadata):
'nftables/postrouting/10-wireguard',
)
def snat(metadata):
if not node.has_bundle('nftables'):
if not node.has_bundle('nftables') or node.os == 'arch':
raise DoNotRunAgain
snat_ip = metadata.get('wireguard/snat_ip', None)

View file

@ -3,4 +3,8 @@ ConditionPathExists=
[Service]
ExecStart=
% if node.os == 'arch':
ExecStart=/usr/bin/zpool import -aN -o cachefile=none
% else:
ExecStart=/usr/sbin/zpool import -aN -o cachefile=none
% endif

View file

@ -43,6 +43,18 @@ defaults = {
},
},
},
'pacman': {
'no_extract': {
'etc/sudoers.d/zfs',
},
'packages': {
'zfs-utils': {
'needed_by': {
'svc_systemd:zfs-zed',
},
},
},
},
'systemd-timers': {
'timers': {
'zfs-auto-snapshot-daily': {
@ -109,6 +121,27 @@ if node.has_bundle('telegraf'):
}
@metadata_reactor.provides(
'pacman/packages',
)
def packages(metadata):
if node.metadata.get('pacman/linux-lts', False):
pkgname = 'zfs-linux-lts'
else:
pkgname = 'zfs-linux'
return {
'pacman': {
'packages': {
pkgname: {
'needed_by': {
'zfs_dataset:',
'zfs_pool:',
},
},
},
},
}
@metadata_reactor.provides(
'apt/packages',
)

View file

@ -1,4 +1,3 @@
109.203.176.0/21
109.237.176.0/20
109.72.116.0/24
116.50.16.0/21
@ -20,6 +19,7 @@
141.77.0.0/16
143.99.213.0/24
145.225.16.0/23
146.247.58.0/24
147.161.22.0/24
147.78.17.0/24
147.79.8.0/21
@ -31,13 +31,10 @@
149.237.203.0/24
149.237.250.0/24
149.237.251.0/24
149.237.254.0/24
149.243.232.0/22
149.249.244.0/22
149.249.244.0/23
149.249.246.0/23
151.243.168.0/24
151.243.173.0/24
153.17.244.8/29
153.17.249.0/24
153.17.250.0/24
@ -49,13 +46,12 @@
153.96.218.0/24
153.96.22.0/24
153.97.32.0/24
153.97.34.0/24
158.116.231.0/24
160.211.126.0/24
163.5.156.0/24
163.5.170.0/24
163.5.186.0/24
163.5.220.0/24
163.5.47.0/24
163.5.66.0/24
164.133.10.0/24
164.133.11.0/24
@ -100,7 +96,6 @@
185.202.32.0/21
185.207.46.0/24
185.21.247.0/24
185.224.0.0/24
185.237.0.0/24
185.237.1.0/24
185.237.2.0/24
@ -113,16 +108,11 @@
185.28.208.0/22
185.39.12.0/22
185.48.0.0/22
185.57.231.0/24
185.57.24.0/24
185.82.160.0/23
185.97.227.0/24
188.208.124.0/24
188.208.125.0/24
188.209.223.0/24
188.214.136.0/24
188.214.137.0/24
188.214.138.0/24
188.214.139.0/24
192.109.121.0/24
192.109.122.0/24
192.109.124.0/24
192.109.129.0/24
@ -163,6 +153,7 @@
193.100.248.0/22
193.100.252.0/24
193.100.3.0/24
193.101.12.0/22
193.101.128.0/22
193.101.139.0/24
193.101.162.0/23
@ -294,7 +285,6 @@
194.127.242.0/23
194.127.254.0/24
194.145.252.0/24
194.147.171.0/24
194.15.194.0/24
194.15.60.0/24
194.15.61.0/24
@ -329,6 +319,7 @@
194.180.64.0/20
194.25.0.0/16
194.25.1.5/32
194.26.191.0/24
194.31.142.0/24
194.31.208.0/24
194.31.209.0/24
@ -339,11 +330,6 @@
194.33.115.0/24
194.33.120.0/24
194.33.121.0/24
194.33.50.0/24
194.38.48.0/24
194.38.49.0/24
194.38.50.0/24
194.38.51.0/24
194.39.175.0/24
194.39.189.0/24
194.39.48.0/20
@ -443,9 +429,6 @@
205.142.63.0/24
212.184.0.0/15
212.185.0.0/16
212.68.172.0/22
212.68.176.0/22
212.68.180.0/22
213.145.90.0/23
213.145.92.0/23
213.173.0.0/19
@ -454,7 +437,7 @@
213.209.156.0/24
217.0.0.0/13
217.117.96.0/24
217.177.33.0/24
217.198.189.0/24
217.224.0.0/11
217.24.32.0/20
217.24.33.0/24
@ -464,22 +447,17 @@
31.224.0.0/11
31.6.56.0/23
37.143.0.0/22
37.230.61.0/24
37.46.11.0/24
37.50.0.0/15
37.80.0.0/12
45.112.192.0/24
45.129.165.0/24
45.132.80.0/22
45.141.54.0/24
45.145.16.0/24
45.147.227.0/24
45.149.7.0/24
45.155.77.0/24
45.81.255.0/24
45.83.136.0/22
45.93.186.0/23
46.202.0.0/24
46.250.224.0/21
46.250.232.0/21
46.78.0.0/15
@ -496,7 +474,6 @@
62.224.0.0/14
62.56.208.0/21
62.68.73.0/24
62.72.172.0/24
64.137.119.0/24
64.137.125.0/24
64.137.127.0/24
@ -539,9 +516,7 @@
84.32.48.0/22
84.55.0.0/24
84.55.1.0/24
84.55.17.0/24
84.55.2.0/24
84.55.22.0/24
84.55.3.0/24
84.55.4.0/24
84.55.5.0/24
@ -552,19 +527,13 @@
85.116.30.0/24
85.116.31.0/24
85.119.160.0/23
85.133.193.0/24
85.133.208.0/24
85.133.214.0/24
85.133.254.0/24
85.204.181.0/24
85.208.248.0/24
85.208.249.0/24
85.208.250.0/24
85.208.251.0/24
86.105.211.0/24
86.105.58.0/24
86.107.164.0/24
86.110.57.0/24
86.38.248.0/21
86.38.37.0/24
87.128.0.0/10
@ -576,6 +545,7 @@
89.116.64.0/22
89.213.186.0/23
89.39.97.0/24
89.43.34.0/24
91.0.0.0/10
91.103.240.0/21
91.124.135.0/24
@ -589,6 +559,7 @@
91.124.27.0/24
91.124.28.0/24
91.124.31.0/24
91.124.32.0/24
91.124.33.0/24
91.124.34.0/24
91.124.36.0/24
@ -635,15 +606,27 @@
91.222.232.0/22
91.227.98.0/23
91.232.54.0/24
91.246.176.0/21
92.112.10.0/24
92.112.158.0/24
92.112.128.0/24
92.112.155.0/24
92.112.157.0/24
92.112.16.0/22
92.112.160.0/24
92.112.162.0/24
92.112.165.0/24
92.112.167.0/24
92.112.20.0/22
92.112.48.0/24
92.112.6.0/24
92.112.7.0/24
92.112.8.0/24
92.112.49.0/24
92.112.52.0/24
92.112.54.0/24
92.112.59.0/24
92.112.63.0/24
92.112.64.0/24
92.112.67.0/24
92.112.79.0/24
92.112.81.0/24
92.112.83.0/24
92.112.94.0/24
92.114.44.0/22
92.119.164.0/22
92.119.208.0/24
@ -652,12 +635,8 @@
92.119.211.0/24
93.113.70.0/24
93.119.201.0/24
93.119.232.0/24
93.192.0.0/10
94.126.98.0/24
94.176.72.0/24
94.176.74.0/24
94.176.79.0/24
94.26.110.0/23
94.26.64.0/23
95.178.8.0/21

View file

@ -6,7 +6,6 @@
109.250.192.0/19
109.250.224.0/19
109.250.64.0/18
109.72.113.0/24
134.101.0.0/21
14.102.90.0/24
143.58.64.0/18
@ -122,7 +121,6 @@
202.71.128.0/20
202.71.141.0/24
212.204.0.0/19
212.23.205.0/24
212.7.128.0/19
212.8.0.0/19
212.80.224.0/19
@ -154,8 +152,6 @@
46.142.96.0/19
46.142.96.0/20
46.189.0.0/17
46.203.156.0/24
46.203.227.0/24
61.8.128.0/19
61.8.128.0/22
61.8.132.0/22
@ -168,7 +164,6 @@
62.214.224.0/19
62.217.32.0/19
62.220.0.0/19
62.220.1.0/24
62.68.82.0/24
62.72.64.0/19
62.72.70.0/24
@ -229,7 +224,6 @@
88.130.0.0/16
88.130.136.0/21
88.130.144.0/20
88.130.172.0/22
88.130.176.0/21
88.130.192.0/23
88.130.194.0/23
@ -248,16 +242,14 @@
88.130.63.0/24
88.130.64.0/19
88.130.96.0/19
89.187.24.0/24
89.187.26.0/24
89.207.200.0/21
89.244.0.0/14
89.244.120.0/21
89.244.160.0/21
89.244.176.0/20
89.244.192.0/19
89.244.224.0/19
89.244.76.0/22
89.244.224.0/20
89.244.76.0/24
89.244.78.0/23
89.244.80.0/20
89.244.96.0/22
@ -274,6 +266,7 @@
89.245.64.0/19
89.245.96.0/20
89.246.0.0/19
89.246.112.0/22
89.246.122.0/24
89.246.124.0/22
89.246.160.0/21
@ -332,8 +325,6 @@
92.117.248.0/21
92.117.64.0/19
92.117.96.0/19
93.114.90.0/24
93.114.91.0/24
94.134.0.0/15
94.134.0.0/18
94.134.112.0/22
@ -359,7 +350,6 @@
2001:1438:1:a00::/56
2001:1438:2000::/36
2001:1438:3000::/36
2001:1438:300::/56
2001:1438:4000::/36
2001:1438::/32
2001:16b8:1000::/40

View file

@ -1,29 +1,30 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v2.0.19 (GNU/Linux)
mQINBGZMb30BEAC6c5P5lo5cLN2wX9+jA7TEEJ/NiiOM9VxBwB/c2PFd6AjdGBbe
28VcXWmFdETg1N3Woq08yNVXdxS1tMslyl9apmmyCiSC2OPMmTOveLzZ196IljYR
DeZMF8C+rdzNKXZzn7+nEp9xRy34QUZRfx6pEnugMd0VK0d/ZKgMbcq2IvcRQwap
60+9t8ppesXhgaRBsAzvrj1twngqXP90JwzKGaR+iaGzrvvJn6cgXkw3MyXhskKY
4J0c7TV6DmTOIfL6RmBp8+SSco8xXD/O/YIpG8LWe+sbMqSaq7jFvKCINWgK4RAt
7mBRHvx81Y8IwV6B2wch/lSyYxKXTbE7uMefy3vyP9A9IFhMbFpc0EJA/4tHYEL4
qPZyR44mizsxa+1h6AXO258ERtzL+FoksXnWTcQqBKjd6SHhLwN4BLsjrlWsJ6lD
VaSKsekEwMFTLvZiLxYXBLPU04dvGNgX7nbkFMEK6RxHqfMu+m6+0jPXzQ+ejuae
xoBBT61O7v5PPTqbZFBKnVzQPf7fBIHW5/AGAc+qAI459viwcCSlJ21RCzirFYc0
/KDuSoo61yyNcq4G271lbT5SNeMZNlDxKkiHjbCpIU6iEF7uK828F1ZGKOMRztok
bzE7j1IDIfDQ3P/zfq73Rr2S9FfHlXvEmLIuj5G4PO7p0IwUlCD1a9oY+QARAQAB
tCxJY2luZ2EgR21iSCAoQnVpbGQgc2VydmVyKSA8aW5mb0BpY2luZ2EuY29tPokC
TgQTAQoAOBYhBN069hmO0AC0wLc5VswRb1WqfyOCBQJmTG99AhsDBQsJCAcCBhUK
CQgLAgQWAgMBAh4BAheAAAoJEMwRb1WqfyOCGrIP/i/4fYEkdCi4nhQGMzSP0Eyh
UhJjsUP9mEqSQRqOAplvjYa1yBbrSPLfkRE0oAL/o+4eUKcAQFeDQtDXJ/D4xl3Q
J5MehRJYzklrSs5XkEscb73HoDBUfFSgCVM2zK+JkCX0CPJ4ZLWtZGJ+8pCLpnkH
nCPonbGc6sS+m2JsPRwxyxAhdXxWSAesXd8dUSW3MOQz9JlC4/idQcCFs03fdhuZ
4jGMry08OihWVudTDK8nkwRZLzNoOivAQ3mIeaTcRMmgPJfYN4k0o90lXJWAbG+2
j8p7Pyjv71OctI8KUbS4+f2H8i6r5Pc4M4hlUQh6QAN9o1oPJrXxurdp0EXgQXSy
rVH2MeguqprFJxGjdlTCSTYgQEmEXMixRAGzteEgCf/Qk9mPXoxFTNyNg4/Lkglb
Nj6dY6or6w+IsbdrcePqDAs+j9t5B97vU7Ldquloj85myQjkWPP8kjlsOlsXBkQ/
C+mD+5iW2AiWh+yCasf6mOZwUfINZF+VDpmfIsZZbWpcMgp1f32fpRFZ3ietnsnR
+luNb19hUHKyyDDHMe/YM7H9P5vtX9BGz6O9kNpo1LAnigkSQSFBZlK3Po3Yk9eg
XPbDT5HsU3TMyS5ZnSDRRPPJwsyGPXz+0pCADae9H9hCc2C2LZIrrtwlOFPWuViA
ifY/dQmUP37n5XgMADRc
=O0zm
mQGiBFKHzk4RBACSHMIFTtfw4ZsNKAA03Gf5t7ovsKWnS7kcMYleAidypqhOmkGg
0petiYsMPYT+MOepCJFGNzwQwJhZrdLUxxMSWay4Xj0ArgpD9vbvU+gj8Tb02l+x
SqNGP8jXMV5UnK4gZsrYGLUPvx47uNNYRIRJAGOPYTvohhnFJiG402dzlwCg4u5I
1RdFplkp9JM6vNM9VBIAmcED/2jr7UQGsPs8YOiPkskGHLh/zXgO8SvcNAxCLgbp
BjGcF4Iso/A2TAI/2KGJW6kBW/Paf722ltU6s/6mutdXJppgNAz5nfpEt4uZKZyu
oSWf77179B2B/Wl1BsX/Oc3chscAgQb2pD/qPF/VYRJU+hvdQkq1zfi6cVsxyREV
k+IwA/46nXh51CQxE29ayuy1BoIOxezvuXFUXZ8rP6aCh4KaiN9AJoy7pBieCzsq
d7rPEeGIzBjI+yhEu8p92W6KWzL0xduWfYg9I7a2GTk8CaLX2OCLuwnKd7RVDyyZ
yzRjWs0T5U7SRAWspLStYxMdKert9lLyQiRHtLwmlgBPqa0gh7Q+SWNpbmdhIE9w
ZW4gU291cmNlIE1vbml0b3JpbmcgKEJ1aWxkIHNlcnZlcikgPGluZm9AaWNpbmdh
Lm9yZz6IYAQTEQIAIAUCUofOTgIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJ
EMbjGcM0QQaCgSQAnRjXdbsyqziqhmxfAKffNJYuMPwdAKCS/IRCVyQzApFBtIBQ
1xuoym/4C7kCDQRSh85OEAgAvPwjlURCi8z6+7i60no4n16dNcSzd6AT8Kizpv2r
9BmNBff/GNYGnHyob/DMtmO2esEuVG8w62rO9m1wzzXzjbtmtU7NZ1Tg+C+reU2I
GNVu3SYtEVK/UTJHAhLcgry9yD99610tYPN2Fx33Efse94mXOreBfCvDsmFGSc7j
GVNCWXpMR3jTYyGj1igYd5ztOzG63D8gPyOucTTl+RWN/G9EoGBv6sWqk5eCd1Fs
JlWyQX4BJn3YsCZx3uj1DWL0dAl2zqcn6m1M4oj1ozW47MqM/efKOcV6VvCs9SL8
F/NFvZcH4LKzeupCQ5jEONqcTlVlnLlIqId95Z4DI4AV9wADBQf/S6sKA4oH49tD
Yb5xAfUyEp5ben05TzUJbXs0Z7hfRQzy9+vQbWGamWLgg3QRUVPx1e4IT+W5vEm5
dggNTMEwlLMI7izCPDcD32B5oxNVxlfj428KGllYWCFj+edY+xKTvw/PHnn+drKs
LE65Gwx4BPHm9EqWHIBX6aPzbgbJZZ06f6jWVBi/N7e/5n8lkxXqS23DBKemapyu
S1i56sH7mQSMaRZP/iiOroAJemPNxv1IQkykxw2woWMmTLKLMCD/i+4DxejE50tK
dxaOLTc4HDCsattw/RVJO6fwE414IXHMv330z4HKWJevMQ+CmQGfswvCwgeBP9n8
PItLjBQAXIhJBBgRAgAJBQJSh85OAhsMAAoJEMbjGcM0QQaCzpAAmwUNoRyySf9p
5G3/2UD1PMueIwOtAKDVVDXEq5LJPVg4iafNu0SRMwgP0Q==
=icbY
-----END PGP PUBLIC KEY BLOCK-----

View file

@ -1,33 +0,0 @@
location ~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg|svg)$ {
expires 365d; # browser caching
}
location ~ /(install.php) { deny all; }
location ~ /(\.ht|\.git|\.hg|\.svn|\.vs|data|conf|bin|inc|vendor|README|VERSION|SECURITY.md|COPYING|composer.json|composer.lock) {
#return 404; # https://www.dokuwiki.org/install:nginx?rev=1734102057#nginx_particulars
deny all; # Returns 403
}
# Support for X-Accel-Redirect
location ~ ^/data/ {
internal;
}
location / {
try_files $uri $uri/ @dokuwiki;
# This means; where $uri is 'path', if 'GET /path' doesnt exist, redirect
# client to 'GET /path/' directory. If neither, goto @dokuwiki rules.
}
location @dokuwiki {
rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
# rewrite ^/tag/(.*) /doku.php?id=tag:$1&do=showtag&tag=tag:$1 last; #untested
rewrite ^/(.*) /doku.php?id=$1&$args last;
# rewrites "doku.php/" out of the URLs if you set the userewrite
# setting to .htaccess in dokuwiki config page
}

Some files were not shown because too many files have changed in this diff Show more