Compare commits

..

No commits in common. "main" and "vmhostumzug" have entirely different histories.

172 changed files with 2748 additions and 2271 deletions

3
.envrc
View file

@ -1,3 +0,0 @@
layout python3
source_env_if_exists .envrc.local

2
.gitignore vendored
View file

@ -1,5 +1,3 @@
.secrets.cfg* .secrets.cfg*
__pycache__ __pycache__
*.swp *.swp
.direnv
.envrc.local

View file

@ -0,0 +1,5 @@
context.exec = [
{ path = "pactl" args = "load-module module-native-protocol-tcp" }
{ path = "pactl" args = "load-module module-zeroconf-discover" }
{ path = "pactl" args = "load-module module-zeroconf-publish" }
]

View file

@ -0,0 +1,3 @@
[Autologin]
User=${user}
Session=i3.desktop

View file

@ -0,0 +1,110 @@
from os import listdir
from os.path import join
actions = {
'fc-cache_flush': {
'command': 'fc-cache -f',
'triggered': True,
'needs': {
'pkg_pacman:fontconfig',
},
},
'i3pystatus_create_virtualenv': {
'command': '/usr/bin/python3 -m virtualenv -p python3 /opt/i3pystatus/venv/',
'unless': 'test -d /opt/i3pystatus/venv/',
'needs': {
'directory:/opt/i3pystatus/src',
'pkg_pacman:python-virtualenv',
},
},
'i3pystatus_install': {
'command': ' && '.join([
'cd /opt/i3pystatus/src',
'/opt/i3pystatus/venv/bin/pip install --upgrade pip colour netifaces basiciw pytz',
'/opt/i3pystatus/venv/bin/pip install --upgrade -e .',
]),
'needs': {
'action:i3pystatus_create_virtualenv',
},
'triggered': True,
},
}
directories = {
'/etc/sddm.conf.d': {
'purge': True,
},
'/opt/i3pystatus/src': {},
'/usr/share/fonts/bundlewrap': {
'purge': True,
'triggers': {
'action:fc-cache_flush',
},
},
}
svc_systemd = {
'avahi-daemon': {
'needs': {
'pkg_pacman:avahi',
},
},
'sddm': {
'needs': {
'pkg_pacman:sddm',
},
},
}
git_deploy = {
'/opt/i3pystatus/src': {
'repo': 'https://github.com/enkore/i3pystatus.git',
'rev': 'current',
'triggers': {
'action:i3pystatus_install',
},
},
}
files['/etc/pipewire/pipewire-pulse.conf.d/50-network.conf'] = {}
for filename in listdir(join(repo.path, 'data', 'arch-with-gui', 'files', 'fonts')):
if filename.startswith('.'):
continue
if filename.endswith('.vault'):
# XXX remove this once we have a new bundlewrap release
# https://github.com/bundlewrap/bundlewrap/commit/2429b153dd1ca6781cf3812e2dec9c2b646a546b
from os import environ
if environ.get('BW_VAULT_DUMMY_MODE', '0') == '1':
continue
font_name = filename[:-6]
attrs = {
'content': repo.vault.decrypt_file_as_base64(join('arch-with-gui', 'files', 'fonts', filename)),
'content_type': 'base64',
}
else:
font_name = filename
attrs = {
'source': join('fonts', filename),
'content_type': 'binary',
}
files[f'/usr/share/fonts/bundlewrap/{font_name}'] = {
'triggers': {
'action:fc-cache_flush',
},
**attrs,
}
if node.metadata.get('arch-with-gui/autologin_as', None):
files['/etc/sddm.conf.d/autologin.conf'] = {
'context': {
'user': node.metadata.get('arch-with-gui/autologin_as'),
},
'content_type': 'mako',
'before': {
'svc_systemd:sddm',
},
}

View file

@ -0,0 +1,124 @@
assert node.os == 'arch'
defaults = {
'backups': {
'paths': {
'/etc/netctl',
},
},
'icinga_options': {
'exclude_from_monitoring': True,
},
'nftables': {
'input': {
'50-avahi': {
'udp dport 5353 accept',
'udp sport 5353 accept',
},
},
},
'pacman': {
'packages': {
# fonts
'fontconfig': {},
'ttf-dejavu': {
'needed_by': {
'pkg_pacman:sddm',
},
},
# login management
'sddm': {},
# networking
'avahi': {},
'netctl': {},
'rfkill': {},
'wpa_supplicant': {},
'wpa_actiond': {},
# shell and other gui stuff
'dunst': {},
'fish': {},
'kitty': {},
'libnotify': {}, # provides notify-send
'light': {},
'redshift': {},
'rofi': {},
# sound
'calf': {},
'easyeffects': {},
'lsp-plugins': {},
'pavucontrol': {},
'pipewire': {},
'pipewire-jack': {},
'pipewire-pulse': {},
'pipewire-zeroconf': {},
'qpwgraph': {},
# window management
'i3-wm': {},
'i3lock': {},
'xss-lock': {},
# i3pystatus dependencies
'iw': {},
'wireless_tools': {},
# Xorg
'xf86-input-libinput': {},
'xf86-input-wacom': {},
'xorg-server': {},
'xorg-setxkbmap': {},
'xorg-xev': {},
'xorg-xinput': {},
'xorg-xset': {},
# all them apps
'browserpass': {},
'browserpass-firefox': {},
'ffmpeg': {},
'firefox': {},
'gimp': {},
'imagemagick': {},
'inkscape': {},
'kdenlive': {},
'maim': {},
'mosh': {},
'mosquitto': {},
'mpv': {},
'pass': {},
'pass-otp': {},
'pdftk': {},
'pwgen': {},
'qpdfview': {},
'samba': {},
'shotcut': {},
'sipcalc': {},
'the_silver_searcher': {},
'tlp': {},
'virt-manager': {},
'xclip': {},
'xdotool': {}, # needed for maim window selection
},
},
}
@metadata_reactor.provides(
'backups/paths',
)
def backup_every_user_home(metadata):
paths = set()
for user, config in metadata.get('users', {}).items():
if config.get('delete', False):
continue
paths.add(config.get('home', f'/home/{user}'))
return {
'backups': {
'paths': paths,
},
}

View file

@ -1,22 +0,0 @@
[server]
host-name=${node.name.split('.')[-1]}
use-ipv4=yes
use-ipv6=${'yes' if node.metadata.get('avahi-daemon/use-ipv6') else 'no'}
allow-interfaces=${','.join(sorted(node.metadata.get('interfaces', {}).keys()))}
ratelimit-interval-usec=1000000
ratelimit-burst=1000
[wide-area]
enable-wide-area=yes
[publish]
disable-publishing=no
disable-user-service-publishing=no
publish-hinfo=yes
publish-workstation=no
publish-aaaa-on-ipv4=no
publish-a-on-ipv6=no
[reflector]
[rlimits]

View file

@ -1,18 +0,0 @@
directories['/etc/avahi/services'] = {
'purge': True,
}
files['/etc/avahi/avahi-daemon.conf'] = {
'content_type': 'mako',
'triggers': {
'svc_systemd:avahi-daemon:restart',
},
}
svc_systemd['avahi-daemon'] = {
'needs': {
'file:/etc/avahi/avahi-daemon.conf',
'pkg_apt:avahi-daemon',
'pkg_apt:libnss-mdns',
},
}

View file

@ -1,11 +0,0 @@
defaults = {
'apt': {
'packages': {
'avahi-daemon': {},
'libnss-mdns': {},
},
},
'avahi-daemon': {
'use-ipv6': True,
}
}

View file

@ -160,7 +160,7 @@ def monitoring(metadata):
client, client,
config['one_backup_every_hours'], config['one_backup_every_hours'],
), ),
'vars.sshmon_timeout': 40, 'vars.sshmon_timeout': 20,
} }
return { return {

View file

@ -24,6 +24,7 @@ files = {
'before': { 'before': {
'action:', 'action:',
'pkg_apt:', 'pkg_apt:',
'pkg_pacman:',
}, },
}, },
} }

View file

@ -1,5 +1,10 @@
if node.os == 'arch':
filename = '/etc/bird.conf'
else:
filename = '/etc/bird/bird.conf'
files = { files = {
'/etc/bird/bird.conf': { filename: {
'content_type': 'mako', 'content_type': 'mako',
'triggers': { 'triggers': {
'svc_systemd:bird:reload', 'svc_systemd:bird:reload',
@ -10,7 +15,7 @@ files = {
svc_systemd = { svc_systemd = {
'bird': { 'bird': {
'needs': { 'needs': {
f'file:/etc/bird/bird.conf', f'file:{filename}',
}, },
}, },
} }

View file

@ -13,6 +13,15 @@ defaults = {
}, },
}, },
}, },
'pacman': {
'packages': {
'bird': {
'needed_by': {
'svc_systemd:bird',
},
},
},
},
'sysctl': { 'sysctl': {
'options': { 'options': {
'net.ipv4.conf.all.forwarding': '1', 'net.ipv4.conf.all.forwarding': '1',

View file

@ -7,6 +7,9 @@ supported_os = {
12: 'bookworm', 12: 'bookworm',
99: 'unstable', 99: 'unstable',
}, },
'raspbian': {
10: 'buster',
},
} }
try: try:
@ -79,10 +82,6 @@ actions = {
'triggered': True, 'triggered': True,
'cascade_skip': False, 'cascade_skip': False,
}, },
'apt_execute_update_commands': {
'command': ' && '.join(sorted(node.metadata.get('apt/additional_update_commands', {'true'}))),
'triggered': True,
},
} }
directories = { directories = {

View file

@ -1,3 +1,10 @@
if node.os == 'arch':
service_name = 'cronie'
package_name = 'pkg_pacman:cronie'
else:
service_name = 'cron'
package_name = 'pkg_apt:cron'
files = { files = {
'/etc/crontab': { '/etc/crontab': {
'content_type': 'mako', 'content_type': 'mako',
@ -17,9 +24,9 @@ directories = {
} }
svc_systemd = { svc_systemd = {
'cron': { service_name: {
'needs': { 'needs': {
'pkg_apt:cron', package_name,
}, },
}, },
} }

View file

@ -4,4 +4,9 @@ defaults = {
'cron': {}, 'cron': {},
}, },
}, },
'pacman': {
'packages': {
'cronie': {},
},
},
} }

View file

@ -1,39 +0,0 @@
#!/usr/bin/env python3
from json import loads
from subprocess import check_output
from sys import argv
try:
container_name = argv[1]
docker_ps = check_output([
'docker',
'container',
'ls',
'--all',
'--format',
'json',
'--filter',
f'name={container_name}'
])
containers = loads(f"[{','.join([l for l in docker_ps.decode().splitlines() if l])}]")
if not containers:
print(f'CRITICAL: container {container_name} not found!')
exit(2)
if len(containers) > 1:
print(f'Found more than one container matching {container_name}!')
print(docker_ps)
exit(3)
if containers[0]['State'] != 'running':
print(f'WARNING: container {container_name} not "running"')
exit(2)
print(f"OK: {containers[0]['Status']}")
except Exception as e:
print(repr(e))
exit(2)

View file

@ -1,50 +0,0 @@
#!/bin/bash
[[ -n "$DEBUG" ]] && set -x
ACTION="$1"
set -euo pipefail
if [[ -z "$ACTION" ]]
then
echo "Usage: $0 start|stop"
exit 1
fi
PUID="$(id -u "docker-${name}")"
PGID="$(id -g "docker-${name}")"
if [ "$ACTION" == "start" ]
then
docker run -d \
--name "${name}" \
--env "PUID=$PUID" \
--env "PGID=$PGID" \
--env "TZ=${timezone}" \
% for k, v in sorted(environment.items()):
--env "${k}=${v}" \
% endfor
--network host \
% for host_port, container_port in sorted(ports.items()):
--expose "127.0.0.1:${host_port}:${container_port}" \
% endfor
% for host_path, container_path in sorted(volumes.items()):
--volume "/var/opt/docker-engine/${name}/${host_path}:${container_path}" \
% endfor
--restart unless-stopped \
"${image}"
elif [ "$ACTION" == "stop" ]
then
docker stop "${name}"
docker rm "${name}"
else
echo "Unknown action $ACTION"
exit 1
fi
% if node.has_bundle('nftables'):
systemctl reload nftables
% endif

View file

@ -1,14 +0,0 @@
[Unit]
Description=docker-engine app ${name}
After=network.target
Requires=${' '.join(sorted(requires))}
[Service]
WorkingDirectory=/var/opt/docker-engine/${name}/
ExecStart=/opt/docker-engine/${name} start
ExecStop=/opt/docker-engine/${name} stop
Type=simple
RemainAfterExit=true
[Install]
WantedBy=multi-user.target

View file

@ -1,99 +0,0 @@
from bundlewrap.metadata import metadata_to_json
deps = {
'pkg_apt:docker-ce',
'pkg_apt:docker-ce-cli',
}
directories['/opt/docker-engine'] = {
'purge': True,
}
directories['/var/opt/docker-engine'] = {}
files['/etc/docker/daemon.json'] = {
'content': metadata_to_json(node.metadata.get('docker-engine/config')),
'triggers': {
'svc_systemd:docker:restart',
},
# install config before installing packages to ensure the config is
# applied to the first start as well
'before': deps,
}
svc_systemd['docker'] = {
'needs': deps,
}
files['/usr/local/share/icinga/plugins/check_docker_container'] = {
'mode': '0755',
}
for app, config in node.metadata.get('docker-engine/containers', {}).items():
volumes = config.get('volumes', {})
files[f'/opt/docker-engine/{app}'] = {
'source': 'docker-wrapper',
'content_type': 'mako',
'context': {
'environment': config.get('environment', {}),
'image': config['image'],
'name': app,
'ports': config.get('ports', {}),
'timezone': node.metadata.get('timezone'),
'volumes': volumes,
},
'mode': '0755',
'triggers': {
f'svc_systemd:docker-{app}:restart',
},
}
users[f'docker-{app}'] = {
'home': f'/var/opt/docker-engine/{app}',
'groups': {
'docker',
},
'after': {
# provides docker group
'pkg_apt:docker-ce',
},
}
files[f'/usr/local/lib/systemd/system/docker-{app}.service'] = {
'source': 'docker-wrapper.service',
'content_type': 'mako',
'context': {
'name': app,
'requires': {
*set(config.get('requires', set())),
'docker.service',
}
},
'triggers': {
'action:systemd-reload',
f'svc_systemd:docker-{app}:restart',
},
}
svc_systemd[f'docker-{app}'] = {
'needs': {
*deps,
f'file:/opt/docker-engine/{app}',
f'file:/usr/local/lib/systemd/system/docker-{app}.service',
f'user:docker-{app}',
'svc_systemd:docker',
*set(config.get('needs', set())),
},
}
for volume in volumes:
directories[f'/var/opt/docker-engine/{app}/{volume}'] = {
'owner': f'docker-{app}',
'group': f'docker-{app}',
'needed_by': {
f'svc_systemd:docker-{app}',
},
# don't do anything if the directory exists, docker images
# mangle owners
'unless': f'test -d /var/opt/docker-engine/{app}/{volume}',
}

View file

@ -1,83 +0,0 @@
defaults = {
'apt': {
'packages': {
'docker-ce': {},
'docker-ce-cli': {},
'docker-compose-plugin': {},
},
'repos': {
'docker': {
'items': {
'deb https://download.docker.com/linux/debian {os_release} stable',
},
},
},
},
'backups': {
'paths': {
'/var/opt/docker-engine',
},
},
'hosts': {
'entries': {
'172.17.0.1': {
'host.docker.internal',
},
},
},
'docker-engine': {
'config': {
'iptables': False,
'no-new-privileges': True,
},
},
'zfs': {
'datasets': {
'tank/docker-data': {
'mountpoint': '/var/opt/docker-engine',
},
},
},
}
@metadata_reactor.provides(
'icinga2_api/docker-engine/services',
)
def monitoring(metadata):
services = {
'DOCKER PROCESS': {
'command_on_monitored_host': '/usr/lib/nagios/plugins/check_procs -C dockerd -c 1:',
},
}
for app in metadata.get('docker-engine/containers', {}):
services[f'DOCKER CONTAINER {app}'] = {
'command_on_monitored_host': f'sudo /usr/local/share/icinga/plugins/check_docker_container {app}'
}
return {
'icinga2_api': {
'docker-engine': {
'services': services,
},
},
}
@metadata_reactor.provides(
'zfs/datasets',
)
def zfs(metadata):
datasets = {}
for app in metadata.get('docker-engine/containers', {}):
datasets[f'tank/docker-data/{app}'] = {
'mountpoint': f'/var/opt/docker-engine/{app}'
}
return {
'zfs': {
'datasets': datasets,
},
}

View file

@ -1,64 +0,0 @@
assert node.has_bundle('docker-engine')
assert node.has_bundle('redis')
assert not node.has_bundle('postgresql') # docker container uses that port
defaults = {
'docker-engine': {
'containers': {
'immich': {
'image': 'ghcr.io/imagegenius/immich:latest',
'environment': {
'DB_DATABASE_NAME': 'immich',
'DB_HOSTNAME': 'host.docker.internal',
'DB_PASSWORD': repo.vault.password_for(f'{node.name} postgresql immich'),
'DB_USERNAME': 'immich',
'REDIS_HOSTNAME': 'host.docker.internal',
},
'volumes': {
'config': '/config',
'libraries': '/libraries',
'photos': '/photos',
},
'needs': {
'svc_systemd:docker-postgresql14',
},
'requires': {
'docker-postgresql14.service',
},
},
'postgresql14': {
'image': 'tensorchord/pgvecto-rs:pg14-v0.2.0',
'environment': {
'POSTGRES_PASSWORD': repo.vault.password_for(f'{node.name} postgresql immich'),
'POSTGRES_USER': 'immich',
'POSTGRES_DB': 'immich',
},
'volumes': {
'database': '/var/lib/postgresql/data',
},
},
},
},
'nginx': {
'vhosts': {
'immich': {
'locations': {
'/': {
'target': 'http://127.0.0.1:8080/',
'websockets': True,
'max_body_size': '500m',
},
#'/api/socket.io/': {
# 'target': 'http://127.0.0.1:8081/',
# 'websockets': True,
#},
},
},
},
},
'redis': {
'bind': '0.0.0.0',
},
}

View file

@ -29,8 +29,8 @@ mail_location = maildir:/var/mail/vmail/%d/%n
protocols = imap lmtp sieve protocols = imap lmtp sieve
ssl = required ssl = required
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/fullchain.pem ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/privkey.pem ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/privkey.pem
ssl_dh = </etc/ssl/certs/dhparam.pem ssl_dh = </etc/ssl/certs/dhparam.pem
ssl_min_protocol = TLSv1.2 ssl_min_protocol = TLSv1.2
ssl_cipher_list = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305 ssl_cipher_list = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305

View file

@ -20,7 +20,7 @@ def nodejs(metadata):
if version >= (1, 11, 71): if version >= (1, 11, 71):
return { return {
'nodejs': { 'nodejs': {
'version': 22, 'version': 20,
}, },
} }
else: else:

View file

@ -100,7 +100,7 @@ def nginx(metadata):
}, },
}, },
'website_check_path': '/user/login', 'website_check_path': '/user/login',
'website_check_string': 'Sign in', 'website_check_string': 'Sign In',
}, },
}, },
}, },

View file

@ -72,6 +72,7 @@ actions = {
'yarn build', 'yarn build',
]), ]),
'needs': { 'needs': {
'action:nodejs_install_yarn',
'file:/opt/hedgedoc/config.json', 'file:/opt/hedgedoc/config.json',
'git_deploy:/opt/hedgedoc', 'git_deploy:/opt/hedgedoc',
'pkg_apt:nodejs', 'pkg_apt:nodejs',

View file

@ -2,42 +2,48 @@
from sys import exit from sys import exit
from packaging.version import parse import requests
from requests import get from packaging import version
API_TOKEN = "${token}" bearer = "${bearer}"
DOMAIN = "${domain}" domain = "${domain}"
OK = 0
WARN = 1
CRITICAL = 2
UNKNOWN = 3
status = 3
message = "Unknown Update Status"
domain = "hass.home.kunbox.net"
s = requests.Session()
s.headers.update({"Content-Type": "application/json"})
try: try:
r = get("https://version.home-assistant.io/stable.json") stable_version = version.parse(
r.raise_for_status() s.get("https://version.home-assistant.io/stable.json").json()["homeassistant"][
stable_version = parse(r.json()["homeassistant"]["generic-x86-64"]) "generic-x86-64"
except Exception as e: ]
print(f"Could not get stable version information from home-assistant.io: {e!r}")
exit(3)
try:
r = get(
f"https://{DOMAIN}/api/config",
headers={"Authorization": f"Bearer {API_TOKEN}", "Content-Type": "application/json"},
) )
r.raise_for_status() s.headers.update(
running_version = parse(r.json()["version"]) {"Authorization": f"Bearer {bearer}", "Content-Type": "application/json"}
except Exception as e:
print(f"Could not get running version information from homeassistant: {e!r}")
exit(3)
try:
if stable_version > running_version:
print(
f"There is a newer version available: {stable_version} (currently installed: {running_version})"
) )
exit(2) running_version = version.parse(
s.get(f"https://{domain}/api/config").json()["version"]
)
if running_version == stable_version:
status = 0
message = f"OK - running version {running_version} equals stable version {stable_version}"
elif running_version > stable_version:
status = 1
message = f"WARNING - stable version {stable_version} is lower than running version {running_version}, check if downgrade is necessary."
else: else:
print( status = 2
f"Currently running version {running_version} matches newest release on home-assistant.io" message = f"CRITICAL - update necessary, running version {running_version} is lower than stable version {stable_version}"
)
exit(0)
except Exception as e: except Exception as e:
print(repr(e)) message = f"{message}: {repr(e)}"
exit(3)
print(message)
exit(status)

View file

@ -5,8 +5,6 @@ After=network-online.target
[Service] [Service]
Type=simple Type=simple
User=homeassistant User=homeassistant
Environment="VIRTUAL_ENV=/opt/homeassistant/venv"
Environment="PATH=/opt/homeassistant/venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
WorkingDirectory=/var/opt/homeassistant WorkingDirectory=/var/opt/homeassistant
ExecStart=/opt/homeassistant/venv/bin/hass -c "/var/opt/homeassistant" ExecStart=/opt/homeassistant/venv/bin/hass -c "/var/opt/homeassistant"
RestartForceExitStatus=100 RestartForceExitStatus=100

View file

@ -30,7 +30,7 @@ files = {
'/usr/local/share/icinga/plugins/check_homeassistant_update': { '/usr/local/share/icinga/plugins/check_homeassistant_update': {
'content_type': 'mako', 'content_type': 'mako',
'context': { 'context': {
'token': node.metadata.get('homeassistant/api_secret'), 'bearer': repo.vault.decrypt(node.metadata.get('homeassistant/api_secret')),
'domain': node.metadata.get('homeassistant/domain'), 'domain': node.metadata.get('homeassistant/domain'),
}, },
'mode': '0755', 'mode': '0755',

View file

@ -50,13 +50,17 @@ def check_list(ip_list, blocklist, warn_ips):
]).decode().splitlines() ]).decode().splitlines()
for item in result: for item in result:
if item.startswith(';;'): if item.startswith(';;'):
continue msgs.append('{} - {}'.format(
blocklist,
item,
))
else:
msgs.append('{} listed in {} as {}'.format( msgs.append('{} listed in {} as {}'.format(
ip, ip,
blocklist, blocklist,
item, item,
)) ))
if item in warn_ips and returncode < 2: if (item in warn_ips or item.startswith(';;')) and returncode < 2:
returncode = 1 returncode = 1
else: else:
returncode = 2 returncode = 2

View file

@ -129,14 +129,11 @@ def notify_per_ntfy():
data=message_text, data=message_text,
headers=headers, headers=headers,
auth=(CONFIG['ntfy']['user'], CONFIG['ntfy']['password']), auth=(CONFIG['ntfy']['user'], CONFIG['ntfy']['password']),
timeout=10,
) )
r.raise_for_status() r.raise_for_status()
except Exception as e: except Exception as e:
log_to_syslog('Sending a Notification failed: {}'.format(repr(e))) log_to_syslog('Sending a Notification failed: {}'.format(repr(e)))
return False
return True
def notify_per_mail(): def notify_per_mail():
@ -202,8 +199,7 @@ if __name__ == '__main__':
notify_per_mail() notify_per_mail()
if args.sms: if args.sms:
ntfy_worked = False if args.service_name:
if CONFIG['ntfy']['user']:
ntfy_worked = notify_per_ntfy()
if not args.service_name or not ntfy_worked:
notify_per_sms() notify_per_sms()
if CONFIG['ntfy']['user']:
notify_per_ntfy()

View file

@ -401,6 +401,22 @@ for rnode in sorted(repo.nodes):
DAYS_TO_STRING[day%7]: f'{hour}:{minute}-{hour}:{minute+15}', DAYS_TO_STRING[day%7]: f'{hour}:{minute}-{hour}:{minute+15}',
}, },
}) })
elif (
rnode.has_bundle('pacman')
and rnode.metadata.get('pacman/unattended-upgrades/is_enabled', False)
):
day = rnode.metadata.get('pacman/unattended-upgrades/day')
hour = rnode.metadata.get('pacman/unattended-upgrades/hour')
minute = rnode.magic_number%30
downtimes.append({
'name': 'unattended-upgrades',
'host': rnode.name,
'comment': f'Downtime for upgrade-and-reboot of node {rnode.name}',
'times': {
DAYS_TO_STRING[day%7]: f'{hour}:{minute}-{hour}:{minute+15}',
},
})
files['/etc/icinga2/conf.d/groups.conf'] = { files['/etc/icinga2/conf.d/groups.conf'] = {
'source': 'icinga2/groups.conf', 'source': 'icinga2/groups.conf',

View file

@ -17,6 +17,7 @@ defaults = {
'icinga2': {}, 'icinga2': {},
'icinga2-ido-pgsql': {}, 'icinga2-ido-pgsql': {},
'icingaweb2': {}, 'icingaweb2': {},
'icingaweb2-module-monitoring': {},
'python3-easysnmp': {}, 'python3-easysnmp': {},
'python3-flask': {}, 'python3-flask': {},
'snmp': {}, 'snmp': {},

View file

@ -23,7 +23,7 @@ actions = {
git_deploy = { git_deploy = {
'/opt/infobeamer-cms/src': { '/opt/infobeamer-cms/src': {
'rev': 'master', 'rev': 'master',
'repo': 'https://github.com/voc/infobeamer-cms.git', 'repo': 'https://github.com/sophieschi/36c3-cms.git',
'needs': { 'needs': {
'directory:/opt/infobeamer-cms/src', 'directory:/opt/infobeamer-cms/src',
}, },
@ -96,6 +96,14 @@ files = {
}, },
} }
pkg_pip = {
'github-flask': {
'needed_by': {
'svc_systemd:infobeamer-cms',
},
},
}
svc_systemd = { svc_systemd = {
'infobeamer-cms': { 'infobeamer-cms': {
'needs': { 'needs': {

View file

@ -1,13 +1,10 @@
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta
assert node.has_bundle('redis')
defaults = { defaults = {
'infobeamer-cms': { 'infobeamer-cms': {
'config': { 'config': {
'MAX_UPLOADS': 5, 'MAX_UPLOADS': 5,
'PREFERRED_URL_SCHEME': 'https', 'PREFERRED_URL_SCHEME': 'https',
'REDIS_HOST': '127.0.0.1',
'SESSION_COOKIE_NAME': '__Host-sess', 'SESSION_COOKIE_NAME': '__Host-sess',
'STATIC_PATH': '/opt/infobeamer-cms/static', 'STATIC_PATH': '/opt/infobeamer-cms/static',
'URL_KEY': repo.vault.password_for(f'{node.name} infobeamer-cms url key'), 'URL_KEY': repo.vault.password_for(f'{node.name} infobeamer-cms url key'),
@ -52,7 +49,7 @@ def nginx(metadata):
'infobeamer-cms/config/TIME_MIN', 'infobeamer-cms/config/TIME_MIN',
) )
def event_times(metadata): def event_times(metadata):
event_start = datetime.strptime(metadata.get('infobeamer-cms/event_start_date'), '%Y-%m-%d').replace(tzinfo=timezone.utc) event_start = datetime.strptime(metadata.get('infobeamer-cms/event_start_date'), '%Y-%m-%d')
event_duration = metadata.get('infobeamer-cms/event_duration_days', 4) event_duration = metadata.get('infobeamer-cms/event_duration_days', 4)
event_end = event_start + timedelta(days=event_duration) event_end = event_start + timedelta(days=event_duration)

View file

@ -1,10 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import logging import logging
from datetime import datetime from datetime import datetime, timezone
from json import dumps from json import dumps
from time import sleep from time import sleep
from zoneinfo import ZoneInfo
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
from requests import RequestException, get from requests import RequestException, get
@ -25,8 +24,7 @@ logging.basicConfig(
) )
LOG = logging.getLogger("main") LOG = logging.getLogger("main")
TZ = ZoneInfo("Europe/Berlin") MLOG = logging.getLogger("mqtt")
DUMP_TIME = "0900"
state = None state = None
@ -40,10 +38,7 @@ def mqtt_out(message, level="INFO", device=None):
key = "infobeamer" key = "infobeamer"
if device: if device:
key += f"/{device['id']}" key += f"/{device['id']}"
if device["description"]:
message = f"[{device['description']}] {message}" message = f"[{device['description']}] {message}"
else:
message = f"[{device['serial']}] {message}"
client.publish( client.publish(
CONFIG["mqtt"]["topic"], CONFIG["mqtt"]["topic"],
@ -66,14 +61,14 @@ def mqtt_dump_state(device):
out.append("Location: {}".format(device["location"])) out.append("Location: {}".format(device["location"]))
out.append("Setup: {} ({})".format(device["setup"]["name"], device["setup"]["id"])) out.append("Setup: {} ({})".format(device["setup"]["name"], device["setup"]["id"]))
out.append("Resolution: {}".format(device["run"].get("resolution", "unknown"))) out.append("Resolution: {}".format(device["run"].get("resolution", "unknown")))
if not device["is_synced"]:
out.append("syncing ...")
mqtt_out( mqtt_out(
" - ".join(out), " - ".join(out),
device=device, device=device,
) )
def is_dump_time():
return datetime.now(TZ).strftime("%H%M") == DUMP_TIME
mqtt_out("Monitor starting up") mqtt_out("Monitor starting up")
while True: while True:
@ -86,14 +81,15 @@ while True:
r.raise_for_status() r.raise_for_status()
ib_state = r.json()["devices"] ib_state = r.json()["devices"]
except RequestException as e: except RequestException as e:
LOG.exception("Could not get device data from info-beamer") LOG.exception("Could not get data from info-beamer")
mqtt_out( mqtt_out(
f"Could not get device data from info-beamer: {e!r}", f"Could not get data from info-beamer: {e!r}",
level="WARN", level="WARN",
) )
else: else:
new_state = {} new_state = {}
for device in sorted(ib_state, key=lambda x: x["id"]): online_devices = set()
for device in ib_state:
did = str(device["id"]) did = str(device["id"])
if did in new_state: if did in new_state:
@ -101,8 +97,7 @@ while True:
continue continue
new_state[did] = device new_state[did] = device
# force information output for every online device at 09:00 CE(S)T must_dump_state = False
must_dump_state = is_dump_time()
if state is not None: if state is not None:
if did not in state: if did not in state:
@ -145,15 +140,17 @@ while True:
if device["is_online"]: if device["is_online"]:
if device["maintenance"]: if device["maintenance"]:
mqtt_out( mqtt_out(
"maintenance required: {}".format( "maintenance required: {}".join(
" ".join(sorted(device["maintenance"])) sorted(device["maintenance"])
), ),
level="WARN", level="WARN",
device=device, device=device,
) )
must_dump_state = True
if ( if (
device["location"] != state[did]["location"] device["is_synced"] != state[did]["is_synced"]
or device["location"] != state[did]["location"]
or device["setup"]["id"] != state[did]["setup"]["id"] or device["setup"]["id"] != state[did]["setup"]["id"]
or device["run"].get("resolution") or device["run"].get("resolution")
!= state[did]["run"].get("resolution") != state[did]["run"].get("resolution")
@ -165,52 +162,23 @@ while True:
else: else:
LOG.info("adding device {} to empty state".format(device["id"])) LOG.info("adding device {} to empty state".format(device["id"]))
if device["is_online"]:
online_devices.add(
"{} ({})".format(
device["id"],
device["description"],
)
)
state = new_state state = new_state
try: if (
r = get( datetime.now(timezone.utc).strftime("%H%M") == "1312"
"https://info-beamer.com/api/v1/account", and online_devices
auth=("", CONFIG["api_key"]), and int(datetime.now(timezone.utc).strftime("%S")) < 30
) ):
r.raise_for_status() mqtt_out("Online Devices: {}".format(", ".join(sorted(online_devices))))
ib_account = r.json() sleep(30)
except RequestException as e:
LOG.exception("Could not get account data from info-beamer")
mqtt_out(
f"Could not get account data from info-beamer: {e!r}",
level="WARN",
)
else:
available_credits = ib_account["balance"]
if is_dump_time():
mqtt_out(f"Available Credits: {available_credits}")
if available_credits < 50:
mqtt_out(
f"balance has dropped below 50 credits! (available: {available_credits})",
level="ERROR",
)
elif available_credits < 100:
mqtt_out(
f"balance has dropped below 100 credits! (available: {available_credits})",
level="WARN",
)
for quota_name, quota_config in sorted(ib_account["quotas"].items()):
value = quota_config["count"]["value"]
limit = quota_config["count"]["limit"]
if value > limit * 0.9:
mqtt_out(
f"quota {quota_name} is over 90% (limit {limit}, value {value})",
level="ERROR",
)
elif value > limit * 0.8:
mqtt_out(
f"quota {quota_name} is over 80% (limit {limit}, value {value})",
level="WARN",
)
sleep(60)
except KeyboardInterrupt: except KeyboardInterrupt:
break break

View file

@ -19,4 +19,9 @@ defaults = {
'/usr/bin/ipmitool *', '/usr/bin/ipmitool *',
}, },
}, },
'pacman': {
'packages': {
'ipmitool': {},
},
},
} }

View file

@ -13,6 +13,15 @@ defaults = {
}, },
}, },
}, },
'pacman': {
'packages': {
'dehydrated': {
'needed_by': {
'action:letsencrypt_update_certificates',
},
},
},
},
} }

View file

@ -10,4 +10,15 @@ defaults = {
}, },
}, },
}, },
'pacman': {
'packages': {
'lldpd': {
'needed_by': {
'directory:/etc/lldpd.d',
'file:/etc/lldpd.conf',
'svc_systemd:lldpd',
},
},
},
},
} }

View file

@ -4,6 +4,11 @@ defaults = {
'lm-sensors': {}, 'lm-sensors': {},
}, },
}, },
'pacman': {
'packages': {
'lm_sensors': {},
},
},
'telegraf': { 'telegraf': {
'input_plugins': { 'input_plugins': {
'builtin': { 'builtin': {

View file

@ -31,7 +31,7 @@ homeservers:
% endfor % endfor
accessTokens: accessTokens:
maxCacheTimeSeconds: 10 maxCacheTimeSeconds: 0
useLocalAppserviceConfig: false useLocalAppserviceConfig: false
admins: admins:
@ -137,8 +137,8 @@ thumbnails:
rateLimit: rateLimit:
enabled: true enabled: true
requestsPerSecond: 100 requestsPerSecond: 10
burst: 5000 burst: 50
identicons: identicons:
enabled: true enabled: true

View file

@ -1,3 +1,8 @@
if node.has_bundle('pacman'):
package = 'pkg_pacman:nfs-utils'
else:
package = 'pkg_apt:nfs-common'
for mount, data in node.metadata.get('nfs-client/mounts',{}).items(): for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
data['mount'] = mount data['mount'] = mount
data['mount_options'] = set(data.get('mount_options', set())) data['mount_options'] = set(data.get('mount_options', set()))
@ -37,7 +42,7 @@ for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
'file:/etc/systemd/system/{}.automount'.format(unitname), 'file:/etc/systemd/system/{}.automount'.format(unitname),
'directory:{}'.format(data['mountpoint']), 'directory:{}'.format(data['mountpoint']),
'svc_systemd:systemd-networkd', 'svc_systemd:systemd-networkd',
'pkg_apt:nfs-common', package,
}, },
} }
else: else:
@ -53,7 +58,7 @@ for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
'file:/etc/systemd/system/{}.mount'.format(unitname), 'file:/etc/systemd/system/{}.mount'.format(unitname),
'directory:{}'.format(data['mountpoint']), 'directory:{}'.format(data['mountpoint']),
'svc_systemd:systemd-networkd', 'svc_systemd:systemd-networkd',
'pkg_apt:nfs-common', package,
}, },
} }

View file

@ -4,6 +4,11 @@ defaults = {
'nfs-common': {}, 'nfs-common': {},
}, },
}, },
'pacman': {
'packages': {
'nfs-utils': {},
},
},
} }
if node.has_bundle('telegraf'): if node.has_bundle('telegraf'):

View file

@ -23,8 +23,9 @@ table inet filter {
icmp type timestamp-request drop icmp type timestamp-request drop
icmp type timestamp-reply drop icmp type timestamp-reply drop
meta l4proto {icmp, ipv6-icmp} accept ip protocol icmp accept
ip6 nexthdr ipv6-icmp accept
% for ruleset, rules in sorted(input.items()): % for ruleset, rules in sorted(input.items()):
# ${ruleset} # ${ruleset}

View file

@ -1,3 +1,8 @@
if node.has_bundle('pacman'):
package = 'pkg_pacman:nftables'
else:
package = 'pkg_apt:nftables'
directories = { directories = {
# used by other bundles # used by other bundles
'/etc/nftables-rules.d': { '/etc/nftables-rules.d': {
@ -37,7 +42,7 @@ svc_systemd = {
'nftables': { 'nftables': {
'needs': { 'needs': {
'file:/etc/nftables.conf', 'file:/etc/nftables.conf',
'pkg_apt:nftables', package,
}, },
}, },
} }

View file

@ -10,9 +10,26 @@ defaults = {
'blocked_v4': repo.libs.firewall.global_ip4_blocklist, 'blocked_v4': repo.libs.firewall.global_ip4_blocklist,
'blocked_v6': repo.libs.firewall.global_ip6_blocklist, 'blocked_v6': repo.libs.firewall.global_ip6_blocklist,
}, },
'pacman': {
'packages': {
'nftables': {},
# https://github.com/bundlewrap/bundlewrap/issues/688
# 'iptables': {
# 'installed': False,
# 'needed_by': {
# 'pkg_pacman:iptables-nft',
# },
# },
'iptables-nft': {
'needed_by': {
'pkg_pacman:nftables',
},
},
},
},
} }
if not node.has_bundle('vmhost') and not node.has_bundle('docker-engine'): if not node.has_bundle('vmhost'):
# see comment in bundles/vmhost/items.py # see comment in bundles/vmhost/items.py
defaults['apt']['packages']['iptables'] = { defaults['apt']['packages']['iptables'] = {
'installed': False, 'installed': False,

View file

@ -0,0 +1,9 @@
[Service]
ExecStart=
ExecStart=/usr/sbin/nginx -c /etc/nginx/nginx.conf
ExecReload=
ExecReload=/bin/sh -c "/bin/kill -s HUP $(/bin/cat /var/run/nginx.pid)"
ExecStop=
ExecStop=/bin/sh -c "/bin/kill -s TERM $(/bin/cat /var/run/nginx.pid)"

View file

@ -1,4 +1,4 @@
user www-data; user ${username};
worker_processes ${worker_processes}; worker_processes ${worker_processes};
pid /var/run/nginx.pid; pid /var/run/nginx.pid;
@ -11,7 +11,7 @@ events {
http { http {
include /etc/nginx/mime.types; include /etc/nginx/mime.types;
types { types {
application/javascript mjs; application/javascript js mjs;
} }
default_type application/octet-stream; default_type application/octet-stream;
charset UTF-8; charset UTF-8;

View file

@ -149,18 +149,18 @@ server {
% if 'target' in options: % if 'target' in options:
proxy_pass ${options['target']}; proxy_pass ${options['target']};
proxy_http_version ${options.get('http_version', '1.1')}; proxy_http_version ${options.get('http_version', '1.1')};
proxy_set_header Host ${options.get('proxy_pass_host', domain)}; proxy_set_header Host ${domain};
% if options.get('websockets', False): % if options.get('websockets', False):
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
% endif % endif
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host ${options.get('x_forwarded_host', options.get('proxy_pass_host', domain))}; proxy_set_header X-Forwarded-Host ${options.get('x_forwarded_host', domain)};
% for option, value in options.get('proxy_set_header', {}).items(): % for option, value in options.get('proxy_set_header', {}).items():
proxy_set_header ${option} ${value}; proxy_set_header ${option} ${value};
% endfor % endfor
% if location != '/' and location != '= /': % if location != '/':
proxy_set_header X-Script-Name ${location}; proxy_set_header X-Script-Name ${location};
% endif % endif
proxy_buffering off; proxy_buffering off;

View file

@ -1,5 +1,12 @@
from datetime import datetime, timedelta from datetime import datetime, timedelta
if node.has_bundle('pacman'):
package = 'pkg_pacman:nginx'
username = 'http'
else:
package = 'pkg_apt:nginx'
username = 'www-data'
directories = { directories = {
'/etc/nginx/sites': { '/etc/nginx/sites': {
'purge': True, 'purge': True,
@ -17,9 +24,9 @@ directories = {
}, },
}, },
'/var/log/nginx-timing': { '/var/log/nginx-timing': {
'owner': 'www-data', 'owner': username,
'needs': { 'needs': {
'pkg_apt:nginx', package,
}, },
}, },
'/var/www': {}, '/var/www': {},
@ -33,6 +40,7 @@ files = {
'/etc/nginx/nginx.conf': { '/etc/nginx/nginx.conf': {
'content_type': 'mako', 'content_type': 'mako',
'context': { 'context': {
'username': username,
**node.metadata['nginx'], **node.metadata['nginx'],
}, },
'triggers': { 'triggers': {
@ -61,13 +69,21 @@ files = {
'/var/www/error.html': {}, '/var/www/error.html': {},
'/var/www/not_found.html': {}, '/var/www/not_found.html': {},
} }
if node.has_bundle('pacman'):
files['/etc/systemd/system/nginx.service.d/bundlewrap.conf'] = {
'source': 'arch-override.conf',
'triggers': {
'action:systemd-reload',
'svc_systemd:nginx:restart',
},
}
svc_systemd = { svc_systemd = {
'nginx': { 'nginx': {
'needs': { 'needs': {
'action:generate-dhparam', 'action:generate-dhparam',
'directory:/var/log/nginx-timing', 'directory:/var/log/nginx-timing',
'pkg_apt:nginx', package,
}, },
}, },
} }

View file

@ -33,6 +33,11 @@ defaults = {
'nginx': { 'nginx': {
'worker_connections': 768, 'worker_connections': 768,
}, },
'pacman': {
'packages': {
'nginx': {},
},
},
} }
if node.has_bundle('telegraf'): if node.has_bundle('telegraf'):

View file

@ -33,6 +33,7 @@ def nodejs_from_version(metadata):
'nodejs': { 'nodejs': {
'items': { 'items': {
f'deb https://deb.nodesource.com/node_{version}.x nodistro main', f'deb https://deb.nodesource.com/node_{version}.x nodistro main',
f'deb-src https://deb.nodesource.com/node_{version}.x nodistro main',
}, },
}, },
}, },

View file

@ -27,22 +27,29 @@ files = {
}, },
} }
if node.has_bundle('pacman'):
package = 'pkg_pacman:openssh'
service = 'sshd'
else:
package = 'pkg_apt:openssh-server'
service = 'ssh'
actions = { actions = {
'sshd_check_config': { 'sshd_check_config': {
'command': 'sshd -T -C user=root -C host=localhost -C addr=localhost', 'command': 'sshd -T -C user=root -C host=localhost -C addr=localhost',
'triggered': True, 'triggered': True,
'triggers': { 'triggers': {
'svc_systemd:ssh:restart', 'svc_systemd:{}:restart'.format(service),
}, },
}, },
} }
svc_systemd = { svc_systemd = {
'ssh': { service: {
'needs': { 'needs': {
'file:/etc/systemd/system/ssh.service.d/bundlewrap.conf', 'file:/etc/systemd/system/ssh.service.d/bundlewrap.conf',
'file:/etc/ssh/sshd_config', 'file:/etc/ssh/sshd_config',
'pkg_apt:openssh-server', package,
}, },
}, },
} }

View file

@ -8,6 +8,11 @@ defaults = {
'openssh-sftp-server': {}, 'openssh-sftp-server': {},
}, },
}, },
'pacman': {
'packages': {
'openssh': {},
},
},
} }
@metadata_reactor.provides( @metadata_reactor.provides(

View file

@ -0,0 +1,38 @@
#!/bin/bash
statusfile="/var/tmp/unattended_upgrades.status"
if ! [[ -f "$statusfile" ]]
then
echo "Status file not found"
exit 3
fi
mtime=$(stat -c %Y $statusfile)
now=$(date +%s)
if (( $now - $mtime > 60*60*24*8 ))
then
echo "Status file is older than 8 days!"
exit 3
fi
exitcode=$(cat $statusfile)
case "$exitcode" in
abort_ssh)
echo "Upgrades skipped due to active SSH login"
exit 1
;;
0)
if [[ -f /var/run/reboot-required ]]
then
echo "OK, but updates require a reboot"
exit 1
else
echo "OK"
exit 0
fi
;;
*)
echo "Last exitcode was $exitcode"
exit 2
;;
esac

View file

@ -0,0 +1,18 @@
#!/bin/bash
set -xeuo pipefail
pacman -Syu --noconfirm --noprogressbar
% for affected, restarts in sorted(restart_triggers.items()):
up_since=$(systemctl show "${affected}" | sed -n 's/^ActiveEnterTimestamp=//p' || echo 0)
up_since_ts=$(date -d "$up_since" +%s || echo 0)
now=$(date +%s)
if [ $((now - up_since_ts)) -lt 3600 ]
then
% for restart in sorted(restarts):
systemctl restart "${restart}" || true
% endfor
fi
% endfor

View file

@ -0,0 +1,2 @@
# just disable faillock.
deny = 0

View file

@ -0,0 +1,52 @@
[options]
Architecture = auto
CheckSpace
Color
HoldPkg = ${' '.join(sorted(node.metadata.get('pacman/ask_before_removal')))}
ILoveCandy
IgnorePkg = ${' '.join(sorted(node.metadata.get('pacman/ignore_packages', set())))}
LocalFileSigLevel = Optional
NoExtract=${' '.join(sorted(node.metadata.get('pacman/no_extract', set())))}
ParallelDownloads = ${node.metadata.get('pacman/parallel_downloads')}
SigLevel = Required DatabaseOptional
VerbosePkgLists
% for line in sorted(node.metadata.get('pacman/additional_config', set())):
${line}
% endfor
[core]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
[extra]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
[community]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
% if node.metadata.get('pacman/enable_multilib', False):
[multilib]
Server = ${node.metadata.get('pacman/repository')}
Include = /etc/pacman.d/mirrorlist
% endif
% if node.metadata.get('pacman/enable_aurto', True):
[aurto]
Server = https://aurto.kunbox.net/
SigLevel = Optional TrustAll
% endif
% if node.has_bundle('zfs'):
[archzfs]
Server = http://archzfs.com/archzfs/x86_64
% if node.metadata.get('pacman/linux-lts', False):
[zfs-linux-lts]
% else:
[zfs-linux]
% endif
Server = http://kernels.archzfs.com/$repo/
% endif

View file

@ -0,0 +1,49 @@
#!/bin/bash
# With systemd, we can force logging to the journal. This is better than
# spamming the world with cron mails. You can then view these logs using
# "journalctl -rat upgrade-and-reboot".
if which logger >/dev/null 2>&1
then
# Dump stdout and stderr to logger, which will then put everything
# into the journal.
exec 1> >(logger -t upgrade-and-reboot -p user.info)
exec 2> >(logger -t upgrade-and-reboot -p user.error)
fi
. /etc/upgrade-and-reboot.conf
echo "Starting upgrade-and-reboot for node $nodename ..."
statusfile="/var/tmp/unattended_upgrades.status"
# Workaround, because /var/tmp is usually 1777
[[ "$UID" == 0 ]] && chown root:root "$statusfile"
logins=$(ps h -C sshd -o euser | awk '$1 != "root" && $1 != "sshd" && $1 != "sshmon" && $1 != "nobody"')
if [[ -n "$logins" ]]
then
echo "Will abort now, there are active SSH logins: $logins"
echo "abort_ssh" > "$statusfile"
exit 1
fi
softlockdir=/var/lib/bundlewrap/soft-$nodename
mkdir -p "$softlockdir"
printf '{"comment": "UPDATE", "date": %s, "expiry": %s, "id": "UNATTENDED", "items": ["*"], "user": "root@localhost"}\n' \
$(date +%s) \
$(date -d 'now + 30 mins' +%s) \
>"$softlockdir"/UNATTENDED
trap 'rm -f "$softlockdir"/UNATTENDED' EXIT
do-unattended-upgrades
ret=$?
echo "$ret" > "$statusfile"
if (( $ret != 0 ))
then
exit 1
fi
systemctl reboot
echo "upgrade-and-reboot for node $nodename is DONE"

View file

@ -0,0 +1,3 @@
nodename="${node.name}"
reboot_mail_to="${node.metadata.get('apt/unattended-upgrades/reboot_mail_to', '')}"
auto_reboot_enabled="${node.metadata.get('apt/unattended-upgrades/reboot_enabled', True)}"

113
bundles/pacman/items.py Normal file
View file

@ -0,0 +1,113 @@
from bundlewrap.exceptions import BundleError
if not node.os == 'arch':
raise BundleError(f'{node.name}: bundle:pacman requires arch linux')
files = {
'/etc/pacman.conf': {
'content_type': 'mako',
},
'/etc/upgrade-and-reboot.conf': {
'content_type': 'mako',
},
'/etc/security/faillock.conf': {},
'/usr/local/sbin/upgrade-and-reboot': {
'mode': '0700',
},
'/usr/local/sbin/do-unattended-upgrades': {
'content_type': 'mako',
'mode': '0700',
'context': {
'restart_triggers': node.metadata.get('pacman/restart_triggers', {}),
}
},
'/usr/local/share/icinga/plugins/check_unattended_upgrades': {
'mode': '0755',
},
}
svc_systemd['paccache.timer'] = {
'needs': {
'pkg_pacman:pacman-contrib',
},
}
pkg_pacman = {
'at': {},
'autoconf': {},
'automake': {},
'bind': {},
'binutils': {},
'bison': {},
'bzip2': {},
'curl': {},
'dialog': {},
'diffutils': {},
'fakeroot': {},
'file': {},
'findutils': {},
'flex': {},
'fwupd': {},
'gawk': {},
'gcc': {},
'gettext': {},
'git': {},
'gnu-netcat': {},
'grep': {},
'groff': {},
'gzip': {},
'htop': {},
'jq': {},
'ldns': {},
'less': {},
'libtool': {},
'logrotate': {},
'lsof': {},
'm4': {},
'mailutils': {},
'make': {},
'man-db': {},
'man-pages': {},
'moreutils': {},
'mtr': {},
'ncdu': {},
'nmap': {},
'pacman-contrib': {},
'patch': {},
'pkgconf': {},
'python': {},
'python-setuptools': {
'needed_by': {
'pkg_pip:',
},
},
'python-pip': {
'needed_by': {
'pkg_pip:',
},
},
'python-virtualenv': {},
'rsync': {},
'run-parts': {},
'sed': {},
'tar': {},
'texinfo': {},
'tmux': {},
'tree': {},
'unzip': {},
'vim': {},
'wget': {},
'which': {},
'whois': {},
'zip': {},
}
if node.metadata.get('pacman/linux-lts', False):
pkg_pacman['linux-lts'] = {}
pkg_pacman['acpi_call-lts'] = {}
else:
pkg_pacman['linux'] = {}
pkg_pacman['acpi_call'] = {}
for pkg, config in node.metadata.get('pacman/packages', {}).items():
pkg_pacman[pkg] = config

View file

@ -0,0 +1,54 @@
defaults = {
'pacman': {
'ask_before_removal': {
'glibc',
'pacman',
},
'no_extract': {
'etc/cron.d/0hourly',
# don't install systemd-homed pam module. It produces a lot of spam in
# journal about systemd-homed not being active, so just get rid of it.
# Requires reinstall of systemd package, though
'usr/lib/security/pam_systemd_home.so',
},
'parallel_downloads': 4,
'repository': 'http://ftp.uni-kl.de/pub/linux/archlinux/$repo/os/$arch',
'unattended-upgrades': {
'day': 5,
'hour': 21,
},
},
}
@metadata_reactor.provides(
'cron/jobs/upgrade-and-reboot',
'icinga2_api/pacman/services',
)
def patchday(metadata):
if not metadata.get('pacman/unattended-upgrades/is_enabled', False):
return {}
day = metadata.get('pacman/unattended-upgrades/day')
hour = metadata.get('pacman/unattended-upgrades/hour')
return {
'cron': {
'jobs': {
'upgrade-and-reboot': '{minute} {hour} * * {day} root /usr/local/sbin/upgrade-and-reboot'.format(
minute=node.magic_number % 30,
hour=hour,
day=day,
),
},
},
'icinga2_api': {
'pacman': {
'services': {
'UNATTENDED UPGRADES': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_unattended_upgrades',
},
},
},
},
}

View file

@ -34,7 +34,7 @@ defaults = {
}, },
}, },
'nodejs': { 'nodejs': {
'version': 22, 'version': 18,
}, },
'postgresql': { 'postgresql': {
'roles': { 'roles': {

View file

@ -0,0 +1,6 @@
[Service]
# arch postfix is not set up for chrooting by default
ExecStartPre=-/usr/sbin/mkdir -p /var/spool/postfix/etc
% for file in ['/etc/localtime', '/etc/nsswitch.conf', '/etc/resolv.conf', '/etc/services']:
ExecStartPre=-/usr/sbin/cp -p ${file} /var/spool/postfix${file}
% endfor

View file

@ -1,5 +1,3 @@
devnull@${node.metadata.get('postfix/myhostname')} DISCARD DEV-NULL
% for address in sorted(blocked): % for address in sorted(blocked):
${address} REJECT ${address} REJECT
% endfor % endfor

View file

@ -3,7 +3,7 @@ biff = no
append_dot_mydomain = no append_dot_mydomain = no
readme_directory = no readme_directory = no
compatibility_level = 2 compatibility_level = 2
myhostname = ${node.metadata.get('postfix/myhostname')} myhostname = ${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}
myorigin = /etc/mailname myorigin = /etc/mailname
mydestination = $myhostname, localhost mydestination = $myhostname, localhost
mynetworks = ${' '.join(sorted(mynetworks))} mynetworks = ${' '.join(sorted(mynetworks))}
@ -25,6 +25,7 @@ inet_interfaces = 127.0.0.1
% endif % endif
<%text> <%text>
smtp_use_tls = yes
smtp_tls_loglevel = 1 smtp_tls_loglevel = 1
smtp_tls_note_starttls_offer = yes smtp_tls_note_starttls_offer = yes
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
@ -37,8 +38,8 @@ smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt
</%text> </%text>
% if node.has_bundle('postfixadmin'): % if node.has_bundle('postfixadmin'):
smtpd_tls_cert_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/fullchain.pem smtpd_tls_cert_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/fullchain.pem
smtpd_tls_key_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/privkey.pem smtpd_tls_key_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/privkey.pem
<%text> <%text>
smtpd_use_tls=yes smtpd_use_tls=yes
smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
@ -47,7 +48,7 @@ smtpd_client_restrictions = permit_mynetworks permit_sasl_authenticated
smtpd_helo_required = yes smtpd_helo_required = yes
smtpd_helo_restrictions = permit_mynetworks reject_invalid_helo_hostname smtpd_helo_restrictions = permit_mynetworks reject_invalid_helo_hostname
smtpd_data_restrictions = reject_unauth_pipelining smtpd_data_restrictions = reject_unauth_pipelining
smtpd_recipient_restrictions = check_recipient_access hash:/etc/postfix/blocked_recipients, permit_mynetworks smtpd_recipient_restrictions = permit_mynetworks, check_recipient_access hash:/etc/postfix/blocked_recipients
smtpd_relay_before_recipient_restrictions = yes smtpd_relay_before_recipient_restrictions = yes
# https://ssl-config.mozilla.org/#server=postfix&version=3.7.10&config=intermediate&openssl=3.0.11&guideline=5.7 # https://ssl-config.mozilla.org/#server=postfix&version=3.7.10&config=intermediate&openssl=3.0.11&guideline=5.7

View file

@ -21,12 +21,13 @@ for identifier in node.metadata.get('postfix/mynetworks', set()):
netmask = '128' netmask = '128'
mynetworks.add(f'[{ip6}]/{netmask}') mynetworks.add(f'[{ip6}]/{netmask}')
my_package = 'pkg_pacman:postfix' if node.os == 'arch' else 'pkg_apt:postfix'
files = { files = {
'/etc/mailname': { '/etc/mailname': {
'content': node.metadata.get('postfix/myhostname'), 'content': node.metadata.get('postfix/myhostname', node.metadata['hostname']),
'before': { 'before': {
'pkg_apt:postfix', my_package,
}, },
'triggers': { 'triggers': {
'svc_systemd:postfix:restart', 'svc_systemd:postfix:restart',
@ -81,7 +82,7 @@ actions = {
'command': 'newaliases', 'command': 'newaliases',
'triggered': True, 'triggered': True,
'needs': { 'needs': {
'pkg_apt:postfix', my_package,
}, },
'before': { 'before': {
'svc_systemd:postfix', 'svc_systemd:postfix',
@ -91,7 +92,7 @@ actions = {
'command': 'postmap hash:/etc/postfix/blocked_recipients', 'command': 'postmap hash:/etc/postfix/blocked_recipients',
'triggered': True, 'triggered': True,
'needs': { 'needs': {
'pkg_apt:postfix', my_package,
}, },
'before': { 'before': {
'svc_systemd:postfix', 'svc_systemd:postfix',
@ -104,7 +105,17 @@ svc_systemd = {
'needs': { 'needs': {
'file:/etc/postfix/master.cf', 'file:/etc/postfix/master.cf',
'file:/etc/postfix/main.cf', 'file:/etc/postfix/main.cf',
'pkg_apt:postfix', my_package,
}, },
}, },
} }
if node.os == 'arch':
files['/etc/systemd/system/postfix.service.d/bundlewrap.conf'] = {
'source': 'arch-override.conf',
'content_type': 'mako',
'triggers': {
'action:systemd-reload',
'svc_systemd:postfix:restart',
},
}

View file

@ -14,7 +14,7 @@ defaults = {
'postfix': { 'postfix': {
'services': { 'services': {
'POSTFIX PROCESS': { 'POSTFIX PROCESS': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit postfix@-', 'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit postfix' + ('' if node.os == 'arch' else '@-'),
}, },
'POSTFIX QUEUE': { 'POSTFIX QUEUE': {
'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_postfix_queue -w 20 -c 40 -d 50', 'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_postfix_queue -w 20 -c 40 -d 50',
@ -22,6 +22,12 @@ defaults = {
}, },
}, },
}, },
'pacman': {
'packages': {
'postfix': {},
's-nail': {},
},
},
} }
if node.has_bundle('postfixadmin'): if node.has_bundle('postfixadmin'):
@ -81,7 +87,7 @@ def letsencrypt(metadata):
} }
result['domains'] = { result['domains'] = {
metadata.get('postfix/myhostname'): set(), metadata.get('postfix/myhostname', metadata.get('hostname')): set(),
} }
return { return {
@ -142,14 +148,3 @@ def icinga2(metadata):
}, },
}, },
} }
@metadata_reactor.provides(
'postfix/myhostname',
)
def myhostname(metadata):
return {
'postfix': {
'myhostname': metadata.get('hostname'),
},
}

View file

@ -57,7 +57,7 @@ files = {
}, },
} }
if node.has_bundle('backup-client'): if node.has_bundle('backup-client') and not node.has_bundle('zfs'):
files['/etc/backup-pre-hooks.d/90-postgresql-dump-all'] = { files['/etc/backup-pre-hooks.d/90-postgresql-dump-all'] = {
'source': 'backup-pre-hook', 'source': 'backup-pre-hook',
'content_type': 'mako', 'content_type': 'mako',
@ -67,6 +67,10 @@ if node.has_bundle('backup-client'):
'mode': '0700', 'mode': '0700',
} }
directories['/var/tmp/postgresdumps'] = {} directories['/var/tmp/postgresdumps'] = {}
else:
files['/var/tmp/postgresdumps'] = {
'delete': True,
}
postgres_roles = { postgres_roles = {
'root': { 'root': {

View file

@ -11,7 +11,6 @@ defaults = {
'backups': { 'backups': {
'paths': { 'paths': {
'/var/lib/postgresql', '/var/lib/postgresql',
'/var/tmp/postgresdumps',
}, },
}, },
'bash_functions': { 'bash_functions': {
@ -75,6 +74,8 @@ if node.has_bundle('zfs'):
}, },
}, },
} }
else:
defaults['backups']['paths'].add('/var/tmp/postgresdumps')
@metadata_reactor.provides( @metadata_reactor.provides(

View file

@ -3,8 +3,6 @@ from os import listdir
from os.path import isfile, join from os.path import isfile, join
from subprocess import check_output from subprocess import check_output
from bundlewrap.utils.ui import io
zone_path = join(repo.path, 'data', 'powerdns', 'files', 'bind-zones') zone_path = join(repo.path, 'data', 'powerdns', 'files', 'bind-zones')
nameservers = set() nameservers = set()
@ -81,10 +79,9 @@ if node.metadata.get('powerdns/features/bind', False):
continue continue
try: try:
output = check_output(['git', 'log', '-1', '--pretty=%ci']).decode('utf-8').strip() output = check_output(['git', 'log', '-1', '--pretty=%ci', join(zone_path, zone)]).decode('utf-8').strip()
serial = datetime.strptime(output, '%Y-%m-%d %H:%M:%S %z').strftime('%y%m%d%H%M') serial = datetime.strptime(output, '%Y-%m-%d %H:%M:%S %z').strftime('%y%m%d%H%M')
except Exception as e: except:
io.stderr(f"Error while parsing commit time for {zone} serial: {e!r}")
serial = datetime.now().strftime('%y%m%d0000') serial = datetime.now().strftime('%y%m%d0000')
primary_zones.add(zone) primary_zones.add(zone)

View file

@ -71,8 +71,8 @@ actions = {
'chown -R powerdnsadmin:powerdnsadmin /opt/powerdnsadmin/src/powerdnsadmin/static/', 'chown -R powerdnsadmin:powerdnsadmin /opt/powerdnsadmin/src/powerdnsadmin/static/',
]), ]),
'needs': { 'needs': {
'action:nodejs_install_yarn',
'action:powerdnsadmin_install_deps', 'action:powerdnsadmin_install_deps',
'bundle:nodejs',
'pkg_apt:', 'pkg_apt:',
}, },
}, },

View file

@ -14,7 +14,7 @@ defaults = {
}, },
}, },
'nodejs': { 'nodejs': {
'version': 22, 'version': 18,
}, },
'users': { 'users': {
'powerdnsadmin': { 'powerdnsadmin': {

View file

@ -7,6 +7,7 @@ from subprocess import check_output
from requests import get from requests import get
UPDATE_URL = '${url}' UPDATE_URL = '${url}'
USERNAME = '${username}' USERNAME = '${username}'
PASSWORD = '${password}' PASSWORD = '${password}'

View file

@ -5,6 +5,7 @@ from ipaddress import ip_address
from json import loads from json import loads
from subprocess import check_output, run from subprocess import check_output, run
DOMAIN = '${domain}' DOMAIN = '${domain}'
# <%text> # <%text>

View file

@ -1,5 +1,5 @@
assert node.has_bundle('redis'), f'{node.name}: pretalx needs redis' assert node.has_bundle('redis'), f'{node.name}: pretalx needs redis'
assert node.has_bundle('nodejs'), f'{node.name}: pretalx needs nodejs for rebuild step' assert node.has_bundle('nodejs'), f'{node.name}: pretalx needs nodejs for rebuild and regenerate_css step'
actions = { actions = {
'pretalx_create_virtualenv': { 'pretalx_create_virtualenv': {
@ -53,6 +53,17 @@ actions = {
}, },
'triggered': True, 'triggered': True,
}, },
'pretalx_regenerate-css': {
'command': 'sudo -u pretalx PRETALX_CONFIG_FILE=/opt/pretalx/pretalx.cfg /opt/pretalx/venv/bin/python -m pretalx regenerate_css',
'needs': {
'action:pretalx_migrate',
'directory:/opt/pretalx/data',
'directory:/opt/pretalx/static',
'file:/opt/pretalx/pretalx.cfg',
'bundle:nodejs',
},
'triggered': True,
},
} }
users = { users = {
@ -79,6 +90,7 @@ git_deploy = {
'action:pretalx_install', 'action:pretalx_install',
'action:pretalx_migrate', 'action:pretalx_migrate',
'action:pretalx_rebuild', 'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'svc_systemd:pretalx-web:restart', 'svc_systemd:pretalx-web:restart',
'svc_systemd:pretalx-worker:restart', 'svc_systemd:pretalx-worker:restart',
}, },
@ -109,6 +121,7 @@ svc_systemd = {
'action:pretalx_install', 'action:pretalx_install',
'action:pretalx_migrate', 'action:pretalx_migrate',
'action:pretalx_rebuild', 'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'file:/etc/systemd/system/pretalx-web.service', 'file:/etc/systemd/system/pretalx-web.service',
'file:/opt/pretalx/pretalx.cfg', 'file:/opt/pretalx/pretalx.cfg',
}, },
@ -117,7 +130,6 @@ svc_systemd = {
'needs': { 'needs': {
'action:pretalx_install', 'action:pretalx_install',
'action:pretalx_migrate', 'action:pretalx_migrate',
'action:pretalx_rebuild',
'file:/etc/systemd/system/pretalx-worker.service', 'file:/etc/systemd/system/pretalx-worker.service',
'file:/opt/pretalx/pretalx.cfg', 'file:/opt/pretalx/pretalx.cfg',
}, },
@ -192,6 +204,7 @@ for plugin_name, plugin_config in node.metadata.get('pretalx/plugins', {}).items
'triggers': { 'triggers': {
'action:pretalx_migrate', 'action:pretalx_migrate',
'action:pretalx_rebuild', 'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'svc_systemd:pretalx-web:restart', 'svc_systemd:pretalx-web:restart',
'svc_systemd:pretalx-worker:restart', 'svc_systemd:pretalx-worker:restart',
}, },

View file

@ -27,7 +27,7 @@ defaults = {
}, },
}, },
'nodejs': { 'nodejs': {
'version': 22, 'version': 18,
}, },
'pretalx': { 'pretalx': {
'database': { 'database': {

View file

@ -1,13 +0,0 @@
files['/etc/proftpd/proftpd.conf'] = {
'source': f'{node.name}.conf',
'triggers': {
'svc_systemd:proftpd:restart',
},
}
svc_systemd['proftpd'] = {
'needs': {
'file:/etc/proftpd/proftpd.conf',
'pkg_apt:proftpd-core',
},
}

View file

@ -1,26 +0,0 @@
from bundlewrap.metadata import atomic
defaults = {
'apt': {
'packages': {
'proftpd-core': {},
},
},
}
@metadata_reactor.provides(
'firewall/port_rules',
)
def firewall(metadata):
sources = atomic(metadata.get('mosquitto/restrict-to', set()))
return {
'firewall': {
'port_rules': {
'20/tcp': sources,
'21/tcp': sources,
'49152-50192/tcp': sources,
},
},
}

View file

@ -48,4 +48,3 @@ tcp-keepalive 0
timeout 0 timeout 0
zset-max-ziplist-entries 128 zset-max-ziplist-entries 128
zset-max-ziplist-value 64 zset-max-ziplist-value 64
protected-mode no

View file

@ -2,6 +2,7 @@ import re
from json import load from json import load
from os.path import join from os.path import join
with open(join(repo.path, 'configs', 'netbox', f'{node.name}.json')) as f: with open(join(repo.path, 'configs', 'netbox', f'{node.name}.json')) as f:
netbox = load(f) netbox = load(f)

View file

@ -1,7 +1,7 @@
reporting { reporting {
enabled = true; enabled = true;
email = 'devnull@${node.metadata.get('postfix/myhostname')}'; email = 'dmarc+${node.name.replace('.', '-')}@kunbox.net';
domain = '${node.metadata.get('postfix/myhostname')}'; domain = '${node.metadata.get('hostname')}';
org_name = 'kunbox.net'; org_name = 'kunbox.net';
smtp = '127.0.0.1'; smtp = '127.0.0.1';
smtp_port = 25; smtp_port = 25;

View file

@ -96,7 +96,7 @@ if 'dkim' in node.metadata.get('rspamd', {}):
}, },
} }
dkim_key = repo.libs.faults.ensure_fault_or_none(node.metadata.get('rspamd/dkim')) dkim_key = repo.libs.faults.ensure_fault_or_none(node.metadata['rspamd']['dkim'])
actions = { actions = {
'rspamd_assure_dkim_key_permissions': { 'rspamd_assure_dkim_key_permissions': {

View file

@ -6,11 +6,6 @@ defaults = {
'rsyslog': {}, 'rsyslog': {},
}, },
}, },
'backups': {
'paths': {
'/var/log/rsyslog',
},
},
'icinga2_api': { 'icinga2_api': {
'rsyslog': { 'rsyslog': {
'services': { 'services': {

View file

@ -1,3 +0,0 @@
[Service]
RestartSec=10
Restart=on-failure

View file

@ -1,67 +0,0 @@
[global]
workgroup = KUNBOX
server string = ${node.name} samba
dns proxy = no
max log size = 1000
syslog = 1
syslog only = 1
panic action = /usr/share/samba/panic-action %d
encrypt passwords = true
passdb backend = tdbsam
obey pam restrictions = yes
map to guest = bad user
load printers = no
usershare allow guests = yes
allow insecure wide links = yes
min protocol = SMB2
% if timemachine:
vfs objects = fruit
fruit:aapl = yes
fruit:copyfile = yes
fruit:model = MacSamba
% endif
% for name, opts in sorted(node.metadata.get('samba/shares', {}).items()):
[${name}]
browseable = yes
comment = ${opts.get('comment', f'share of {opts["path"]}')}
fake oplocks = yes
force group = ${opts.get('force_group', 'nogroup')}
force user = ${opts.get('force_user', 'nobody')}
% if opts.get('guest_ok', True):
guest ok = yes
% else:
guest ok = no
% endif
locking = no
path = ${opts['path']}
printable = no
read only = no
vfs objects = catia fruit
writable = ${'yes' if opts.get('writable', False) else 'no'}
% if opts.get('follow_symlinks', True):
follow symlinks = yes
wide links = yes
% endif
% endfor
% for name in sorted(timemachine):
[timemachine-${name}]
comment = Time Machine backup for ${name}
available = yes
browseable = yes
guest ok = no
read only = false
valid users = timemachine-${name}
path = /srv/timemachine/${name}
durable handles = yes
vfs objects = catia fruit streams_xattr
fruit:delete_empty_adfiles = yes
fruit:metadata = stream
fruit:posix_rename = yes
fruit:time machine = yes
fruit:time machine max size = 2000G
fruit:veto_appledouble = no
fruit:wipe_intentionally_left_blank_rfork = yes
% endfor

View file

@ -1,21 +0,0 @@
<?xml version="1.0" standalone='no'?>
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<service-group>
<name replace-wildcards="yes">%h</name>
<service>
<type>_smb._tcp</type>
<port>445</port>
</service>
<service>
<type>_device-info._tcp</type>
<port>0</port>
<txt-record>model=RackMac1,2</txt-record>
</service>
<service>
<type>_adisk._tcp</type>
% for idx, share_name in enumerate(sorted(shares)):
<txt-record>dk${idx}=adVN=timemachine-${share_name},adVF=0x82</txt-record>
% endfor
<txt-record>sys=waMa=0,adVF=0x100</txt-record>
</service>
</service-group>

View file

@ -1,85 +0,0 @@
svc_systemd = {
'nmbd': {
'needs': {
'pkg_apt:samba',
},
},
'smbd': {
'needs': {
'pkg_apt:samba',
},
},
}
timemachine_shares = node.metadata.get('samba/timemachine-shares', set())
files = {
'/etc/samba/smb.conf': {
'content_type': 'mako',
'context': {
'timemachine': timemachine_shares,
},
'triggers': {
'svc_systemd:nmbd:restart',
'svc_systemd:smbd:restart',
},
},
'/etc/systemd/system/nmbd.service.d/bundlewrap.conf': {
'source': 'override.conf',
'triggers': {
'action:systemd-reload',
'svc_systemd:nmbd:restart',
},
},
'/etc/systemd/system/smbd.service.d/bundlewrap.conf': {
'source': 'override.conf',
'triggers': {
'action:systemd-reload',
'svc_systemd:smbd:restart',
},
},
}
last_action = set()
for user, uconfig in node.metadata.get('users', {}).items():
if (
'password' not in uconfig
or uconfig.get('delete')
or user in ('root',)
):
continue
actions[f'smbpasswd_for_user_{user}'] = {
'command': f'smbpasswd -a -s {user}',
'unless': f'pdbedit -L | grep -E "^{user}:"',
'data_stdin': uconfig['password'] + '\n' + uconfig['password'],
'needs': {
'pkg_apt:samba',
f'user:{user}',
},
'after': last_action,
}
last_action = {
f'action:smbpasswd_for_user_{user}',
}
if timemachine_shares:
assert node.has_bundle('avahi-daemon'), f'{node.name}: samba needs avahi-daemon to publish time machine shares'
files['/etc/avahi/services/timemachine.service'] = {
'content_type': 'mako',
'context': {
'shares': timemachine_shares,
},
}
for share_name in timemachine_shares:
users[f'timemachine-{share_name}'] = {
'home': f'/srv/timemachine/{share_name}',
}
directories[f'/srv/timemachine/{share_name}'] = {
'owner': f'timemachine-{share_name}',
'group': f'timemachine-{share_name}',
'mode': '0700',
}

View file

@ -1,53 +0,0 @@
from bundlewrap.metadata import atomic
defaults = {
'apt': {
'packages': {
'samba': {},
'samba-vfs-modules': {},
}
}
}
@metadata_reactor.provides(
'firewall/port_rules',
)
def firewall(metadata):
return {
'firewall': {
'port_rules': {
'137/udp': atomic(metadata.get('samba/restrict-to', set())),
'138/udp': atomic(metadata.get('samba/restrict-to', set())),
'139/tcp': atomic(metadata.get('samba/restrict-to', set())),
'445/tcp': atomic(metadata.get('samba/restrict-to', set())),
},
},
}
@metadata_reactor.provides(
'zfs/datasets',
)
def timemachine_zfs(metadata):
shares = metadata.get('samba/timemachine-shares', set())
if not shares:
return {}
assert node.has_bundle('zfs'), f'{node.name}: time machine backups require zfs'
datasets = {
'tank/timemachine': {},
}
for share_name in shares:
datasets[f'tank/timemachine/{share_name}'] = {
'mountpoint': f'/srv/timemachine/{share_name}',
}
return {
'zfs': {
'datasets': datasets,
},
}

View file

@ -0,0 +1,21 @@
#!/bin/bash
set -euo pipefail
DATE=$(date +%F_%H-%M-%S)
cd "$1"
convert *.tiff no_ocr.pdf
ocrmypdf -l deu no_ocr.pdf has_ocr.pdf
rm -f *.tiff
rm -f no_ocr.pdf
chown nobody:nogroup has_ocr.pdf
mv has_ocr.pdf "/srv/scansnap/${DATE}.pdf"
cd /
rm -r "$1"

View file

@ -0,0 +1,9 @@
#!/bin/bash
set -euo pipefail
OUTFILE=$(mktemp -d)
scanimage --source 'ADF Duplex' --format tiff --mode Color --brightness 23 --resolution 300 --page-width 210 --page-height 297.3 -x 210 -y 297.3 --batch=${OUTFILE}/p%04d.tiff
/etc/scanbd/scripts/ocr.sh "$OUTFILE" &

View file

@ -0,0 +1,52 @@
global {
debug = true
debug-level = 2
user = saned
group = scanner
saned = "/usr/sbin/saned"
saned_opt = {}
saned_env = { "SANE_CONFIG_DIR=/etc/scanbd" }
scriptdir = /etc/scanbd/scripts
timeout = 500
pidfile = "/var/run/scanbd.pid"
environment {
device = "SCANBD_DEVICE"
action = "SCANBD_ACTION"
}
function function_knob {
filter = "^message.*"
desc = "The value of the function knob / wheel / selector"
env = "SCANBD_FUNCTION"
}
function function_mode {
filter = "^mode.*"
desc = "Color mode"
env = "SCANBD_FUNCTION_MODE"
}
multiple_actions = false
action scan {
filter = "^scan.*"
numerical-trigger {
from-value = 0
to-value = 1
}
desc = "Scan to file"
script = "scan.sh"
}
}
include(scanner.d/avision.conf)
include(scanner.d/fujitsu.conf)
include(scanner.d/hp.conf)
include(scanner.d/pixma.conf)
include(scanner.d/snapscan.conf)
include(scanner.d/canon.conf)
include(scanner.d/plustek.conf)

39
bundles/scansnap/items.py Normal file
View file

@ -0,0 +1,39 @@
directories = {
'/etc/scanbd/scripts': {
'purge': True,
},
'/srv/scansnap': {
'owner': 'nobody',
'group': 'nogroup',
},
}
files = {
'/etc/scanbd/scanbd.conf': {
'triggers': {
'svc_systemd:scanbd:restart',
},
},
'/etc/scanbd/scripts/ocr.sh': {
'mode': '0755',
'needs': {
'directory:/srv/scansnap',
},
},
'/etc/scanbd/scripts/scan.sh': {
'mode': '0755',
'needs': {
'directory:/srv/scansnap',
'file:/etc/scanbd/scripts/ocr.sh',
},
},
}
svc_systemd = {
'scanbd': {
'needs': {
'file:/etc/scanbd/scanbd.conf',
'pkg_apt:scanbd',
},
},
}

View file

@ -0,0 +1,22 @@
defaults = {
'apt': {
'packages': {
'sane-utils': {},
'scanbd': {},
'imagemagick': {},
'ocrmypdf': {},
'tesseract-ocr-deu': {},
},
},
'backups': {
'paths': {
'/srv/scansnap',
},
},
'cron': {
'jobs': {
# Automatically remove files which are older than 14 days
'scansnap_cleanup': '00 00 * * * root /usr/bin/find /srv/scansnap/ -mindepth 1 -mtime +14 -delete',
},
},
}

View file

@ -1,21 +0,0 @@
[Unit]
Description=SDM630 stats printout
Conflicts=getty@tty1.service
After=systemd-user-sessions.service getty@tty1.service plymouth-quit.service
[Service]
User=sdm630_mqtt
Group=sdm630_mqtt
ExecStart=/opt/sdm630_mqtt/venv/bin/python printout.py /opt/sdm630_mqtt/config.toml
WorkingDirectory=/opt/sdm630_mqtt/src
Restart=always
RestartSec=10
StandardInput=tty
StandardOutput=tty
StandardError=journal
TTYPath=/dev/tty1
TTYReset=yes
TTYVHangup=yes
[Install]
WantedBy=multi-user.target

View file

@ -1,14 +0,0 @@
[Unit]
Description=SDM630-to-MQTT bridge
After=network.target
[Service]
User=sdm630_mqtt
Group=sdm630_mqtt
ExecStart=/opt/sdm630_mqtt/venv/bin/python sdm630_mqtt.py /opt/sdm630_mqtt/config.toml
WorkingDirectory=/opt/sdm630_mqtt/src
Restart=always
RestartSec=1
[Install]
WantedBy=multi-user.target

View file

@ -1,76 +0,0 @@
directories['/opt/sdm630_mqtt/src'] = {}
git_deploy['/opt/sdm630_mqtt/src'] = {
'repo': 'https://git.franzi.business/kunsi/sdm630_mqtt.git',
'rev': 'main',
'triggers': {
'action:sdm630_mqtt_install_deps',
},
}
actions['sdm630_mqtt_create_virtualenv'] = {
'command': 'python3 -m virtualenv /opt/sdm630_mqtt/venv',
'unless': 'test -x /opt/sdm630_mqtt/venv/bin/python3',
'needs': {
'directory:/opt/sdm630_mqtt/src',
},
}
actions['sdm630_mqtt_install_deps'] = {
'command': 'cd /opt/sdm630_mqtt/src && /opt/sdm630_mqtt/venv/bin/pip install -r requirements.txt',
'triggered': True,
'needs': {
'action:sdm630_mqtt_create_virtualenv',
},
}
users['sdm630_mqtt'] = {
'home': '/opt/sdm630_mqtt',
}
files['/opt/sdm630_mqtt/config.toml'] = {
'content': repo.libs.faults.dict_as_toml(node.metadata.get('sdm630_mqtt/config')),
'triggers': set(),
}
if node.has_bundle('telegraf'):
files['/opt/sdm630_mqtt/config.toml']['triggers'].add('svc_systemd:telegraf:restart')
git_deploy['/opt/sdm630_mqtt/src']['triggers'].add('svc_systemd:telegraf:restart')
if node.metadata.get('sdm630_mqtt/enable_stats_collection', True):
files['/usr/local/lib/systemd/system/sdm630_to_mqtt.service'] = {
'triggers': {
'action:systemd-reload',
'svc_systemd:sdm630_to_mqtt:restart',
},
}
svc_systemd['sdm630_to_mqtt'] = {
'needs': {
'git_deploy:/opt/sdm630_mqtt/src',
'action:sdm630_mqtt_install_deps',
'file:/usr/local/lib/systemd/system/sdm630_to_mqtt.service',
},
}
files['/opt/sdm630_mqtt/config.toml']['triggers'].add('svc_systemd:sdm630_to_mqtt:restart')
git_deploy['/opt/sdm630_mqtt/src']['triggers'].add('svc_systemd:sdm630_to_mqtt:restart')
if node.metadata.get('sdm630_mqtt/enable_local_printout', False):
files['/usr/local/lib/systemd/system/sdm630_printout.service'] = {
'triggers': {
'action:systemd-reload',
'svc_systemd:sdm630_printout:restart',
},
}
svc_systemd['sdm630_printout'] = {
'needs': {
'git_deploy:/opt/sdm630_mqtt/src',
'action:sdm630_mqtt_install_deps',
'file:/usr/local/lib/systemd/system/sdm630_printout.service',
},
}
files['/opt/sdm630_mqtt/config.toml']['triggers'].add('svc_systemd:sdm630_printout:restart')
git_deploy['/opt/sdm630_mqtt/src']['triggers'].add('svc_systemd:sdm630_printout:restart')

View file

@ -1,38 +0,0 @@
defaults = {
'sdm630_mqtt': {
'config': {
'modbus': {
'host': '127.0.0.1',
'port': 501,
'unit_id': 1,
},
'mqtt': {
'prefix': 'sdm630',
'host': '127.0.0.1',
'port': 1883,
},
'printout': {
'title': 'SDM630',
},
'telegraf': {
'identifier': 'unknown',
},
},
},
'telegraf': {
'input_plugins': {
'execd': {
'sdm630_mqtt': {
'command': [
'/opt/sdm630_mqtt/venv/bin/python',
'/opt/sdm630_mqtt/src/telegraf.py',
'/opt/sdm630_mqtt/config.toml',
],
'signal': 'none',
'restart_delay': '1s',
'data_format': 'influx',
},
},
},
},
}

View file

@ -43,6 +43,30 @@ if node.has_bundle('telegraf'):
} }
@metadata_reactor.provides(
'smartd/disks',
)
def zfs_disks_to_metadata(metadata):
disks = set()
for config in metadata.get('zfs/pools', {}).values():
for option in config['when_creating']['config']:
if option.get('type', '') in {'log', 'cache'}:
continue
for disk in option['devices']:
if search(r'p([0-9]+)$', disk) or disk.startswith('/dev/mapper/'):
continue
disks.add(disk)
return {
'smartd': {
'disks': disks,
},
}
@metadata_reactor.provides( @metadata_reactor.provides(
'icinga2_api/smartd/services', 'icinga2_api/smartd/services',
) )

View file

@ -4,30 +4,27 @@ from re import findall
from subprocess import check_output from subprocess import check_output
from sys import exit from sys import exit
ITERATIONS = 10
try: try:
top_output = None top_output = None
top_output = check_output(rf"top -b -n{ITERATIONS} -d1 | grep -i '^%cpu'", shell=True).decode('UTF-8') for line in check_output(['top', '-b', '-n1', '-d1']).decode('UTF-8').splitlines():
if line.lower().strip().startswith('%cpu'):
top_output = line.lower().split(':', 2)[1]
break
if not top_output:
print('%cpu not found in top output')
exit(3)
cpu_usage = {} cpu_usage = {}
for value, identifier in findall(r'([0-9\.\,]{3,5}) ([a-z]{2})', top_output): for value, identifier in findall('([0-9\.\,]{3,5}) ([a-z]{2})', top_output):
if identifier not in cpu_usage: cpu_usage[identifier] = float(value.replace(',', '.'))
cpu_usage[identifier] = 0.0
cpu_usage[identifier] += float(value.replace(',', '.'))
output = []
for identifier, value_added in cpu_usage.items():
value = value_added / ITERATIONS
output.append(f"{value:.2f} {identifier}")
cpu_usage[identifier] = value
print(f"Average over {ITERATIONS} seconds: " + ", ".join(output))
warn = set() warn = set()
crit = set() crit = set()
print(top_output)
# steal # steal
if cpu_usage['st'] > 10: if cpu_usage['st'] > 10:
crit.add('CPU steal is {}% (>10%)'.format(cpu_usage['st'])) crit.add('CPU steal is {}% (>10%)'.format(cpu_usage['st']))

Some files were not shown because too many files have changed in this diff Show more