Compare commits

..

No commits in common. "main" and "feature/kunsi-ipv6-only-vlan" have entirely different histories.

207 changed files with 2314 additions and 3434 deletions

View file

@ -30,7 +30,6 @@ Rule of thumb: keep ports below 10000 free for stuff that reserves ports.
| 20010 | mautrix-telegram | Bridge |
| 20020 | mautrix-whatsapp | Bridge |
| 20030 | matrix-dimension | Matrix Integrations Manager|
| 20070 | matrix-synapse | sliding-sync |
| 20080 | matrix-synapse | client, federation |
| 20081 | matrix-synapse | prometheus metrics |
| 20090 | matrix-media-repo | media_repo |

View file

@ -6,9 +6,9 @@ apt-get update
DEBIAN_FRONTEND=noninteractive apt-get -y -q -o Dpkg::Options::=--force-confold dist-upgrade
DEBIAN_FRONTEND=noninteractive apt-get -y -q autoremove
DEBIAN_FRONTEND=noninteractive apt-get -y -q autoclean
DEBIAN_FRONTEND=noninteractive apt-get -y -q clean
DEBIAN_FRONTEND=noninteractive apt-get -y -q autoremove
% if clean_old_kernels:
existing=$(dpkg --get-selections | grep -E '^linux-(image|headers)-[0-9]' || true)

View file

@ -0,0 +1 @@
deb http://raspbian.raspberrypi.org/raspbian/ buster main contrib non-free rpi

View file

@ -7,6 +7,9 @@ supported_os = {
12: 'bookworm',
99: 'unstable',
},
'raspbian': {
10: 'buster',
},
}
try:
@ -24,10 +27,6 @@ actions = {
'triggered': True,
'cascade_skip': False,
},
'apt_execute_update_commands': {
'command': ' && '.join(sorted(node.metadata.get('apt/additional_update_commands', {'true'}))),
'triggered': True,
},
}
files = {

View file

@ -21,9 +21,6 @@ defaults = {
'cron/jobs/upgrade-and-reboot'
)
def patchday(metadata):
if not node.metadata.get('apt/unattended-upgrades/enabled', True):
return {}
day = metadata.get('apt/unattended-upgrades/day')
hour = metadata.get('apt/unattended-upgrades/hour')

View file

@ -33,7 +33,7 @@ defaults = {
# networking
'avahi': {},
'netctl': {},
'util-linux': {}, # provides rfkill
'rfkill': {},
'wpa_supplicant': {},
'wpa_actiond': {},

View file

@ -62,13 +62,10 @@ trap "on_exit" EXIT
# redirect stdout and stderr to logfile
prepare_and_cleanup_logdir
if [[ -z "$DEBUG" ]]
then
logfile="$logdir/backup--$(date '+%F--%H-%M-%S')--$$.log.gz"
echo "All log output will go to $logfile" | logger -it backup-client
exec > >(gzip >"$logfile")
exec 2>&1
fi
# this is where the real work starts
ts_begin=$(date +%s)

View file

@ -160,7 +160,7 @@ def monitoring(metadata):
client,
config['one_backup_every_hours'],
),
'vars.sshmon_timeout': 40,
'vars.sshmon_timeout': 20,
}
return {

View file

@ -29,17 +29,6 @@ files = {
},
}
if node.has_any_bundle([
'dovecot',
'nginx',
'postfix',
]):
actions['generate-dhparam'] = {
'command': 'openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048',
'unless': 'test -f /etc/ssl/certs/dhparam.pem',
}
locale_needs = set()
for locale in sorted(node.metadata.get('locale/installed')):
actions[f'ensure_locale_{locale}_is_enabled'] = {
@ -52,9 +41,11 @@ for locale in sorted(node.metadata.get('locale/installed')):
}
locale_needs = {f'action:ensure_locale_{locale}_is_enabled'}
actions['locale-gen'] = {
actions = {
'locale-gen': {
'triggered': True,
'command': 'locale-gen',
},
}
description = []

View file

@ -24,7 +24,7 @@ defaults = {
},
'sysctl': {
'options': {
'net.ipv4.conf.all.forwarding': '1',
'net.ipv4.ip_forward': '1',
'net.ipv6.conf.all.forwarding': '1',
},
},

View file

@ -7,6 +7,9 @@ supported_os = {
12: 'bookworm',
99: 'unstable',
},
'raspbian': {
10: 'buster',
},
}
try:
@ -79,10 +82,6 @@ actions = {
'triggered': True,
'cascade_skip': False,
},
'apt_execute_update_commands': {
'command': ' && '.join(sorted(node.metadata.get('apt/additional_update_commands', {'true'}))),
'triggered': True,
},
}
directories = {

View file

@ -1,39 +0,0 @@
#!/usr/bin/env python3
from json import loads
from subprocess import check_output
from sys import argv
try:
container_name = argv[1]
docker_ps = check_output([
'docker',
'container',
'ls',
'--all',
'--format',
'json',
'--filter',
f'name={container_name}'
])
containers = loads(f"[{','.join([l for l in docker_ps.decode().splitlines() if l])}]")
if not containers:
print(f'CRITICAL: container {container_name} not found!')
exit(2)
if len(containers) > 1:
print(f'Found more than one container matching {container_name}!')
print(docker_ps)
exit(3)
if containers[0]['State'] != 'running':
print(f'WARNING: container {container_name} not "running"')
exit(2)
print(f"OK: {containers[0]['Status']}")
except Exception as e:
print(repr(e))
exit(2)

View file

@ -1,50 +0,0 @@
#!/bin/bash
[[ -n "$DEBUG" ]] && set -x
ACTION="$1"
set -euo pipefail
if [[ -z "$ACTION" ]]
then
echo "Usage: $0 start|stop"
exit 1
fi
PUID="$(id -u "docker-${name}")"
PGID="$(id -g "docker-${name}")"
if [ "$ACTION" == "start" ]
then
docker run -d \
--name "${name}" \
--env "PUID=$PUID" \
--env "PGID=$PGID" \
--env "TZ=${timezone}" \
% for k, v in sorted(environment.items()):
--env "${k}=${v}" \
% endfor
--network host \
% for host_port, container_port in sorted(ports.items()):
--expose "127.0.0.1:${host_port}:${container_port}" \
% endfor
% for host_path, container_path in sorted(volumes.items()):
--volume "/var/opt/docker-engine/${name}/${host_path}:${container_path}" \
% endfor
--restart unless-stopped \
"${image}"
elif [ "$ACTION" == "stop" ]
then
docker stop "${name}"
docker rm "${name}"
else
echo "Unknown action $ACTION"
exit 1
fi
% if node.has_bundle('nftables'):
systemctl reload nftables
% endif

View file

@ -1,14 +0,0 @@
[Unit]
Description=docker-engine app ${name}
After=network.target
Requires=${' '.join(sorted(requires))}
[Service]
WorkingDirectory=/var/opt/docker-engine/${name}/
ExecStart=/opt/docker-engine/${name} start
ExecStop=/opt/docker-engine/${name} stop
Type=simple
RemainAfterExit=true
[Install]
WantedBy=multi-user.target

View file

@ -1,99 +0,0 @@
from bundlewrap.metadata import metadata_to_json
deps = {
'pkg_apt:docker-ce',
'pkg_apt:docker-ce-cli',
}
directories['/opt/docker-engine'] = {
'purge': True,
}
directories['/var/opt/docker-engine'] = {}
files['/etc/docker/daemon.json'] = {
'content': metadata_to_json(node.metadata.get('docker-engine/config')),
'triggers': {
'svc_systemd:docker:restart',
},
# install config before installing packages to ensure the config is
# applied to the first start as well
'before': deps,
}
svc_systemd['docker'] = {
'needs': deps,
}
files['/usr/local/share/icinga/plugins/check_docker_container'] = {
'mode': '0755',
}
for app, config in node.metadata.get('docker-engine/containers', {}).items():
volumes = config.get('volumes', {})
files[f'/opt/docker-engine/{app}'] = {
'source': 'docker-wrapper',
'content_type': 'mako',
'context': {
'environment': config.get('environment', {}),
'image': config['image'],
'name': app,
'ports': config.get('ports', {}),
'timezone': node.metadata.get('timezone'),
'volumes': volumes,
},
'mode': '0755',
'triggers': {
f'svc_systemd:docker-{app}:restart',
},
}
users[f'docker-{app}'] = {
'home': f'/var/opt/docker-engine/{app}',
'groups': {
'docker',
},
'after': {
# provides docker group
'pkg_apt:docker-ce',
},
}
files[f'/usr/local/lib/systemd/system/docker-{app}.service'] = {
'source': 'docker-wrapper.service',
'content_type': 'mako',
'context': {
'name': app,
'requires': {
*set(config.get('requires', set())),
'docker.service',
}
},
'triggers': {
'action:systemd-reload',
f'svc_systemd:docker-{app}:restart',
},
}
svc_systemd[f'docker-{app}'] = {
'needs': {
*deps,
f'file:/opt/docker-engine/{app}',
f'file:/usr/local/lib/systemd/system/docker-{app}.service',
f'user:docker-{app}',
'svc_systemd:docker',
*set(config.get('needs', set())),
},
}
for volume in volumes:
directories[f'/var/opt/docker-engine/{app}/{volume}'] = {
'owner': f'docker-{app}',
'group': f'docker-{app}',
'needed_by': {
f'svc_systemd:docker-{app}',
},
# don't do anything if the directory exists, docker images
# mangle owners
'unless': f'test -d /var/opt/docker-engine/{app}/{volume}',
}

View file

@ -1,83 +0,0 @@
defaults = {
'apt': {
'packages': {
'docker-ce': {},
'docker-ce-cli': {},
'docker-compose-plugin': {},
},
'repos': {
'docker': {
'items': {
'deb https://download.docker.com/linux/debian {os_release} stable',
},
},
},
},
'backups': {
'paths': {
'/var/opt/docker-engine',
},
},
'hosts': {
'entries': {
'172.17.0.1': {
'host.docker.internal',
},
},
},
'docker-engine': {
'config': {
'iptables': False,
'no-new-privileges': True,
},
},
'zfs': {
'datasets': {
'tank/docker-data': {
'mountpoint': '/var/opt/docker-engine',
},
},
},
}
@metadata_reactor.provides(
'icinga2_api/docker-engine/services',
)
def monitoring(metadata):
services = {
'DOCKER PROCESS': {
'command_on_monitored_host': '/usr/lib/nagios/plugins/check_procs -C dockerd -c 1:',
},
}
for app in metadata.get('docker-engine/containers', {}):
services[f'DOCKER CONTAINER {app}'] = {
'command_on_monitored_host': f'sudo /usr/local/share/icinga/plugins/check_docker_container {app}'
}
return {
'icinga2_api': {
'docker-engine': {
'services': services,
},
},
}
@metadata_reactor.provides(
'zfs/datasets',
)
def zfs(metadata):
datasets = {}
for app in metadata.get('docker-engine/containers', {}):
datasets[f'tank/docker-data/{app}'] = {
'mountpoint': f'/var/opt/docker-engine/{app}'
}
return {
'zfs': {
'datasets': datasets,
},
}

View file

@ -1,64 +0,0 @@
assert node.has_bundle('docker-engine')
assert node.has_bundle('redis')
assert not node.has_bundle('postgresql') # docker container uses that port
defaults = {
'docker-engine': {
'containers': {
'immich': {
'image': 'ghcr.io/imagegenius/immich:latest',
'environment': {
'DB_DATABASE_NAME': 'immich',
'DB_HOSTNAME': 'host.docker.internal',
'DB_PASSWORD': repo.vault.password_for(f'{node.name} postgresql immich'),
'DB_USERNAME': 'immich',
'REDIS_HOSTNAME': 'host.docker.internal',
},
'volumes': {
'config': '/config',
'libraries': '/libraries',
'photos': '/photos',
},
'needs': {
'svc_systemd:docker-postgresql14',
},
'requires': {
'docker-postgresql14.service',
},
},
'postgresql14': {
'image': 'tensorchord/pgvecto-rs:pg14-v0.2.0',
'environment': {
'POSTGRES_PASSWORD': repo.vault.password_for(f'{node.name} postgresql immich'),
'POSTGRES_USER': 'immich',
'POSTGRES_DB': 'immich',
},
'volumes': {
'database': '/var/lib/postgresql/data',
},
},
},
},
'nginx': {
'vhosts': {
'immich': {
'locations': {
'/': {
'target': 'http://127.0.0.1:8080/',
'websockets': True,
'max_body_size': '500m',
},
#'/api/socket.io/': {
# 'target': 'http://127.0.0.1:8081/',
# 'websockets': True,
#},
},
},
},
},
'redis': {
'bind': '0.0.0.0',
},
}

View file

@ -3,4 +3,3 @@ driver = pgsql
default_pass_scheme = MD5-CRYPT
password_query = SELECT username as user, password FROM mailbox WHERE username = '%u' AND active = true
user_query = SELECT '/var/mail/vmail/' || maildir as home, 65534 as uid, 65534 as gid FROM mailbox WHERE username = '%u' AND active = true
iterate_query = SELECT username as user FROM mailbox WHERE active = true

View file

@ -28,19 +28,19 @@ namespace inbox {
mail_location = maildir:/var/mail/vmail/%d/%n
protocols = imap lmtp sieve
ssl = required
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/privkey.pem
ssl_dh = </etc/ssl/certs/dhparam.pem
ssl = yes
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/privkey.pem
ssl_dh = </etc/dovecot/ssl/dhparam.pem
ssl_min_protocol = TLSv1.2
ssl_cipher_list = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
ssl_prefer_server_ciphers = no
ssl_cipher_list = EECDH+AESGCM:EDH+AESGCM
ssl_prefer_server_ciphers = yes
login_greeting = IMAPd ready
auth_mechanisms = plain login
first_valid_uid = 65534
disable_plaintext_auth = yes
mail_plugins = $mail_plugins zlib old_stats fts fts_xapian
mail_plugins = $mail_plugins zlib old_stats
plugin {
zlib_save_level = 6
@ -56,15 +56,6 @@ plugin {
old_stats_refresh = 30 secs
old_stats_track_cmds = yes
fts = xapian
fts_xapian = partial=3 full=20
fts_autoindex = yes
fts_enforced = yes
# Index attachements
fts_decoder = decode2text
% if node.has_bundle('rspamd'):
sieve_before = /var/mail/vmail/sieve/global/spam-global.sieve
@ -95,19 +86,14 @@ service auth {
}
}
service decode2text {
executable = script /usr/lib/dovecot/decode2text.sh
user = dovecot
unix_listener decode2text {
mode = 0666
service lmtp {
unix_listener /var/spool/postfix/private/dovecot-lmtp {
group = postfix
mode = 0600
user = postfix
}
}
service indexer-worker {
vsz_limit = 0
process_limit = 0
}
service imap {
executable = imap
}
@ -118,14 +104,6 @@ service imap-login {
vsz_limit = 64M
}
service lmtp {
unix_listener /var/spool/postfix/private/dovecot-lmtp {
group = postfix
mode = 0600
user = postfix
}
}
service managesieve-login {
inet_listener sieve {
port = 4190

View file

@ -2,6 +2,10 @@
# by this bundle
repo.libs.tools.require_bundle(node, 'postfix')
directories = {
'/etc/dovecot/ssl': {},
}
files = {
'/etc/dovecot/dovecot.conf': {
'content_type': 'mako',
@ -45,17 +49,25 @@ files = {
},
}
symlinks['/usr/lib/dovecot/decode2text.sh'] = {
'target': '/usr/share/doc/dovecot-core/examples/decode2text.sh',
'before': {
'svc_systemd:dovecot',
actions = {
'dovecot_generate_dhparam': {
'command': 'openssl dhparam -out /etc/dovecot/ssl/dhparam.pem 2048',
'unless': 'test -f /etc/dovecot/ssl/dhparam.pem',
'cascade_skip': False,
'needs': {
'directory:/etc/dovecot/ssl',
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
}
svc_systemd = {
'dovecot': {
'needs': {
'action:generate-dhparam',
'action:dovecot_generate_dhparam',
'file:/etc/dovecot/dovecot.conf',
'file:/etc/dovecot/dovecot-sql.conf',
},

View file

@ -3,7 +3,6 @@ from bundlewrap.metadata import atomic
defaults = {
'apt': {
'packages': {
'dovecot-fts-xapian': {},
'dovecot-imapd': {},
'dovecot-lmtpd': {},
'dovecot-managesieved': {},
@ -36,16 +35,6 @@ defaults = {
'dovecot',
},
},
'systemd-timers': {
'timers': {
'dovecot_fts_optimize': {
'command': [
'/usr/bin/doveadm fts optimize -A',
],
'when': '02:{}:00'.format(node.magic_number % 60),
},
},
},
}
if node.has_bundle('postfixadmin'):

View file

@ -33,7 +33,7 @@ actions = {
'yarn build',
]),
'needs': {
'action:apt_execute_update_commands',
'action:nodejs_install_yarn',
'pkg_apt:nodejs',
},
'triggered': True,

View file

@ -11,26 +11,6 @@ defaults = {
},
}
@metadata_reactor.provides(
'nodejs/version',
)
def nodejs(metadata):
version = tuple([int(i) for i in metadata.get('element-web/version')[1:].split('.')])
if version >= (1, 11, 71):
return {
'nodejs': {
'version': 20,
},
}
else:
return {
'nodejs': {
'version': 18,
},
}
@metadata_reactor.provides(
'nginx/vhosts/element-web',
)

View file

@ -100,7 +100,7 @@ def nginx(metadata):
},
},
'website_check_path': '/user/login',
'website_check_string': 'Sign in',
'website_check_string': 'Sign In',
},
},
},

View file

@ -43,7 +43,6 @@ def nginx(metadata):
'locations': {
'/': {
'target': 'http://127.0.0.1:21010',
'websockets': True,
},
'/api/ds/query': {
'target': 'http://127.0.0.1:21010',

View file

@ -72,6 +72,7 @@ actions = {
'yarn build',
]),
'needs': {
'action:nodejs_install_yarn',
'file:/opt/hedgedoc/config.json',
'git_deploy:/opt/hedgedoc',
'pkg_apt:nodejs',

View file

@ -2,42 +2,48 @@
from sys import exit
from packaging.version import parse
from requests import get
import requests
from packaging import version
API_TOKEN = "${token}"
DOMAIN = "${domain}"
bearer = "${bearer}"
domain = "${domain}"
OK = 0
WARN = 1
CRITICAL = 2
UNKNOWN = 3
status = 3
message = "Unknown Update Status"
domain = "hass.home.kunbox.net"
s = requests.Session()
s.headers.update({"Content-Type": "application/json"})
try:
r = get("https://version.home-assistant.io/stable.json")
r.raise_for_status()
stable_version = parse(r.json()["homeassistant"]["generic-x86-64"])
except Exception as e:
print(f"Could not get stable version information from home-assistant.io: {e!r}")
exit(3)
try:
r = get(
f"https://{DOMAIN}/api/config",
headers={"Authorization": f"Bearer {API_TOKEN}", "Content-Type": "application/json"},
stable_version = version.parse(
s.get("https://version.home-assistant.io/stable.json").json()["homeassistant"][
"generic-x86-64"
]
)
r.raise_for_status()
running_version = parse(r.json()["version"])
except Exception as e:
print(f"Could not get running version information from homeassistant: {e!r}")
exit(3)
try:
if stable_version > running_version:
print(
f"There is a newer version available: {stable_version} (currently installed: {running_version})"
s.headers.update(
{"Authorization": f"Bearer {bearer}", "Content-Type": "application/json"}
)
exit(2)
running_version = version.parse(
s.get(f"https://{domain}/api/config").json()["version"]
)
if running_version == stable_version:
status = 0
message = f"OK - running version {running_version} equals stable version {stable_version}"
elif running_version > stable_version:
status = 1
message = f"WARNING - stable version {stable_version} is lower than running version {running_version}, check if downgrade is necessary."
else:
print(
f"Currently running version {running_version} matches newest release on home-assistant.io"
)
exit(0)
status = 2
message = f"CRITICAL - update necessary, running version {running_version} is lower than stable version {stable_version}"
except Exception as e:
print(repr(e))
exit(3)
message = f"{message}: {repr(e)}"
print(message)
exit(status)

View file

@ -5,8 +5,6 @@ After=network-online.target
[Service]
Type=simple
User=homeassistant
Environment="VIRTUAL_ENV=/opt/homeassistant/venv"
Environment="PATH=/opt/homeassistant/venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
WorkingDirectory=/var/opt/homeassistant
ExecStart=/opt/homeassistant/venv/bin/hass -c "/var/opt/homeassistant"
RestartForceExitStatus=100

View file

@ -1,13 +1,6 @@
if node.has_bundle('pyenv'):
python_version = sorted(node.metadata.get('pyenv/python_versions'))[-1]
python_path = f'/opt/pyenv/versions/{python_version}/bin/python'
else:
python_path = '/usr/bin/python3'
users = {
'homeassistant': {
'home': '/var/opt/homeassistant',
"groups": ["dialout"],
},
}
@ -30,7 +23,7 @@ files = {
'/usr/local/share/icinga/plugins/check_homeassistant_update': {
'content_type': 'mako',
'context': {
'token': node.metadata.get('homeassistant/api_secret'),
'bearer': repo.vault.decrypt(node.metadata.get('homeassistant/api_secret')),
'domain': node.metadata.get('homeassistant/domain'),
},
'mode': '0755',
@ -39,7 +32,7 @@ files = {
actions = {
'homeassistant_create_virtualenv': {
'command': f'sudo -u homeassistant virtualenv -p {python_path} /opt/homeassistant/venv/',
'command': 'sudo -u homeassistant /usr/bin/python3 -m virtualenv -p python3 /opt/homeassistant/venv/',
'unless': 'test -d /opt/homeassistant/venv/',
'needs': {
'directory:/opt/homeassistant',

View file

@ -1,132 +0,0 @@
#!/usr/bin/env python3
import re
from hashlib import md5
from sys import argv, exit
# Supress SSL certificate warnings for ssl_verify=False
import urllib3
from lxml import html
from requests import Session
USERNAME_FIELD = "g2"
PASSWORD_FIELD = "g3"
CRSF_FIELD = "password"
STATUS_OK = 0
STATUS_WARNING = 1
STATUS_CRITICAL = 2
STATUS_UNKNOWN = 3
class OMMCrawler:
def __init__(self, hostname, username, password):
self.session = Session()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.session.verify = False
self.url = f"https://{hostname}"
self.login_data = {
USERNAME_FIELD: username,
PASSWORD_FIELD: password,
CRSF_FIELD: md5(password.encode()).hexdigest(),
}
self.logged_in = False
def login(self):
# if we have multiple dect masters, find out which one is the current master
current_master_url = self.session.get(self.url, verify=False).url
self.hostname = re.search(r"^(.*[\\\/])", current_master_url).group(0)[:-1]
response = self.session.post(f"{self.url}/login_set.html", data=self.login_data)
response.raise_for_status()
# set cookie
pass_value = re.search(r"(?<=pass=)\d+(?=;)", response.text).group(0)
self.session.cookies.set("pass", pass_value)
self.logged_in = True
def get_station_status(self):
if not self.logged_in:
self.login()
data = {}
response = self.session.get(f"{self.url}/fp_pnp_status.html")
response.raise_for_status()
tree = html.fromstring(response.text)
xpath_results = tree.xpath('//tr[@class="l0" or @class="l1"]')
for result in xpath_results:
bubble_is_in_inactive_cluster = False
bubble_is_connected = False
bubble_is_active = False
bubble_name = result.xpath("td[4]/text()")[0]
try:
bubble_is_connected = result.xpath("td[11]/img/@alt")[0] == "yes"
if bubble_is_connected:
try:
bubble_is_active = result.xpath("td[12]/img/@alt")[0] == "yes"
except IndexError:
# If an IndexError occurs, there is no image in the
# 12th td. This means this bubble is in the not inside
# an active DECT cluster, but is a backup bubble.
# This is probably fine.
bubble_is_active = False
bubble_is_in_inactive_cluster = True
else:
bubble_is_active = False
except:
# There is no Image in the 11th td. This usually means there
# is a warning message in the 10th td. We do not care about
# that, currently.
pass
data[bubble_name] = {
"is_connected": bubble_is_connected,
"is_active": bubble_is_active,
"is_in_inactive_cluster": bubble_is_in_inactive_cluster,
}
return data
def handle_station_data(self):
try:
data = self.get_station_status()
except Exception as e:
print(f"Something went wrong. You should take a look at {self.url}")
print(repr(e))
exit(STATUS_UNKNOWN)
critical = False
for name, status in data.items():
if not status["is_active"] and not status["is_connected"]:
print(
f"Base station {name} is not active or connected! Check manually!"
)
critical = True
elif not status["is_active"] and not status["is_in_inactive_cluster"]:
# Bubble is part of an active DECT cluster, but not active.
# This shouldn't happen.
print(
f"Base station {name} is not active but connected! Check manually!"
)
critical = True
elif not status["is_connected"]:
# This should never happen. Seeing this state means OMM
# itself is broken.
print(
f"Base station {name} is not connected but active! Check manually!"
)
critical = True
if critical:
exit(STATUS_CRITICAL)
else:
print(f"OK - {len(data)} base stations connected")
exit(STATUS_OK)
if __name__ == "__main__":
omm = OMMCrawler(argv[1], argv[2], argv[3])
omm.handle_station_data()

View file

@ -50,13 +50,17 @@ def check_list(ip_list, blocklist, warn_ips):
]).decode().splitlines()
for item in result:
if item.startswith(';;'):
continue
msgs.append('{} - {}'.format(
blocklist,
item,
))
else:
msgs.append('{} listed in {} as {}'.format(
ip,
blocklist,
item,
))
if item in warn_ips and returncode < 2:
if (item in warn_ips or item.startswith(';;')) and returncode < 2:
returncode = 1
else:
returncode = 2

View file

@ -9,11 +9,6 @@ app = Flask(__name__)
@app.route('/status')
def statuspage():
everything_fine = True
try:
check_output(['/usr/local/share/icinga/plugins/check_mounts'])
except:
everything_fine = False
try:
check_output(['/usr/lib/nagios/plugins/check_procs', '-C', 'icinga2', '-c', '1:'])
except:

View file

@ -3,6 +3,8 @@ Description=Icinga2 Statusmonitor
After=network.target
[Service]
User=nagios
Group=nagios
Environment="FLASK_APP=/etc/icinga2/icinga_statusmonitor.py"
ExecStart=/usr/bin/python3 -m flask run
WorkingDirectory=/tmp

View file

@ -113,14 +113,9 @@ def notify_per_ntfy():
else:
subject = '[ICINGA] {}'.format(args.host_name)
if args.notification_type.lower() == 'recovery':
priority = 'default'
else:
priority = 'urgent'
headers = {
'Title': subject,
'Priority': priority,
'Priority': 'urgent',
}
try:
@ -129,14 +124,11 @@ def notify_per_ntfy():
data=message_text,
headers=headers,
auth=(CONFIG['ntfy']['user'], CONFIG['ntfy']['password']),
timeout=10,
)
r.raise_for_status()
except Exception as e:
log_to_syslog('Sending a Notification failed: {}'.format(repr(e)))
return False
return True
def notify_per_mail():
@ -202,8 +194,7 @@ if __name__ == '__main__':
notify_per_mail()
if args.sms:
ntfy_worked = False
if CONFIG['ntfy']['user']:
ntfy_worked = notify_per_ntfy()
if not args.service_name or not ntfy_worked:
if args.service_name:
notify_per_sms()
if CONFIG['ntfy']['user']:
notify_per_ntfy()

View file

@ -275,27 +275,6 @@ files = {
'mode': '0660',
'group': 'icingaweb2',
},
# monitoring
'/etc/icinga2/icinga_statusmonitor.py': {
'triggers': {
'svc_systemd:icinga_statusmonitor:restart',
},
},
'/usr/local/lib/systemd/system/icinga_statusmonitor.service': {
'triggers': {
'action:systemd-reload',
'svc_systemd:icinga_statusmonitor:restart',
},
},
}
svc_systemd['icinga_statusmonitor'] = {
'needs': {
'file:/etc/icinga2/icinga_statusmonitor.py',
'file:/usr/local/lib/systemd/system/icinga_statusmonitor.service',
'pkg_apt:python3-flask',
},
}
actions = {
@ -337,12 +316,15 @@ for name in files:
for name in symlinks:
icinga_run_deps.add(f'symlink:{name}')
svc_systemd['icinga2'] = {
svc_systemd = {
'icinga2': {
'needs': icinga_run_deps,
},
}
# The actual hosts and services management starts here
bundles = set()
downtimes = []

View file

@ -17,8 +17,8 @@ defaults = {
'icinga2': {},
'icinga2-ido-pgsql': {},
'icingaweb2': {},
'icingaweb2-module-monitoring': {},
'python3-easysnmp': {},
'python3-flask': {},
'snmp': {},
}
},
@ -131,9 +131,6 @@ def nginx(metadata):
'/api/': {
'target': 'https://127.0.0.1:5665/',
},
'/statusmonitor/': {
'target': 'http://127.0.0.1:5000/',
},
},
'extras': True,
},

View file

@ -0,0 +1,4 @@
<%
from tomlkit import dumps as toml_dumps
from bundlewrap.utils.text import toml_clean
%>${toml_clean(toml_dumps(repo.libs.faults.resolve_faults(config), sort_keys=True))}

View file

@ -23,7 +23,7 @@ actions = {
git_deploy = {
'/opt/infobeamer-cms/src': {
'rev': 'master',
'repo': 'https://github.com/voc/infobeamer-cms.git',
'repo': 'https://github.com/sophieschi/36c3-cms.git',
'needs': {
'directory:/opt/infobeamer-cms/src',
},
@ -68,7 +68,10 @@ for room, device_id in sorted(node.metadata.get('infobeamer-cms/rooms', {}).item
files = {
'/opt/infobeamer-cms/settings.toml': {
'content': repo.libs.faults.dict_as_toml(config),
'content_type': 'mako',
'context': {
'config': config,
},
'triggers': {
'svc_systemd:infobeamer-cms:restart',
},
@ -96,6 +99,14 @@ files = {
},
}
pkg_pip = {
'github-flask': {
'needed_by': {
'svc_systemd:infobeamer-cms',
},
},
}
svc_systemd = {
'infobeamer-cms': {
'needs': {

View file

@ -0,0 +1,4 @@
<%
from tomlkit import dumps as toml_dumps
from bundlewrap.utils.text import toml_clean
%>${toml_clean(toml_dumps(repo.libs.faults.resolve_faults(config), sort_keys=True))}

View file

@ -1,10 +1,9 @@
#!/usr/bin/env python3
import logging
from datetime import datetime
from datetime import datetime, timezone
from json import dumps
from time import sleep
from zoneinfo import ZoneInfo
import paho.mqtt.client as mqtt
from requests import RequestException, get
@ -25,8 +24,7 @@ logging.basicConfig(
)
LOG = logging.getLogger("main")
TZ = ZoneInfo("Europe/Berlin")
DUMP_TIME = "0900"
MLOG = logging.getLogger("mqtt")
state = None
@ -57,20 +55,17 @@ def mqtt_out(message, level="INFO", device=None):
def mqtt_dump_state(device):
if not device["is_online"]:
return
out = []
if device["location"]:
out.append("Location: {}".format(device["location"]))
out.append("Setup: {} ({})".format(device["setup"]["name"], device["setup"]["id"]))
out.append("Resolution: {}".format(device["run"].get("resolution", "unknown")))
mqtt_out(
" - ".join(out),
"Sync status: {} - Location: {} - Running Setup: {} ({}) - Resolution: {}".format(
"yes" if device["is_synced"] else "syncing",
device["location"],
device["setup"]["name"],
device["setup"]["id"],
device["run"].get("resolution", "unknown"),
),
device=device,
)
def is_dump_time():
return datetime.now(TZ).strftime("%H%M") == DUMP_TIME
mqtt_out("Monitor starting up")
while True:
@ -83,13 +78,14 @@ while True:
r.raise_for_status()
ib_state = r.json()["devices"]
except RequestException as e:
LOG.exception("Could not get device data from info-beamer")
LOG.exception("Could not get data from info-beamer")
mqtt_out(
f"Could not get device data from info-beamer: {e!r}",
f"Could not get data from info-beamer: {e!r}",
level="WARN",
)
else:
new_state = {}
online_devices = set()
for device in ib_state:
did = str(device["id"])
@ -98,8 +94,7 @@ while True:
continue
new_state[did] = device
# force information output for every online device at 09:00 CE(S)T
must_dump_state = is_dump_time()
must_dump_state = False
if state is not None:
if did not in state:
@ -142,15 +137,17 @@ while True:
if device["is_online"]:
if device["maintenance"]:
mqtt_out(
"maintenance required: {}".format(
" ".join(sorted(device["maintenance"]))
"maintenance required: {}".join(
sorted(device["maintenance"])
),
level="WARN",
device=device,
)
must_dump_state = True
if (
device["location"] != state[did]["location"]
device["is_synced"] != state[did]["is_synced"]
or device["location"] != state[did]["location"]
or device["setup"]["id"] != state[did]["setup"]["id"]
or device["run"].get("resolution")
!= state[did]["run"].get("resolution")
@ -162,52 +159,23 @@ while True:
else:
LOG.info("adding device {} to empty state".format(device["id"]))
if device["is_online"]:
online_devices.add(
"{} ({})".format(
device["id"],
device["description"],
)
)
state = new_state
try:
r = get(
"https://info-beamer.com/api/v1/account",
auth=("", CONFIG["api_key"]),
)
r.raise_for_status()
ib_account = r.json()
except RequestException as e:
LOG.exception("Could not get account data from info-beamer")
mqtt_out(
f"Could not get account data from info-beamer: {e!r}",
level="WARN",
)
else:
available_credits = ib_account["balance"]
if is_dump_time():
mqtt_out(f"Available Credits: {available_credits}")
if available_credits < 50:
mqtt_out(
f"balance has dropped below 50 credits! (available: {available_credits})",
level="ERROR",
)
elif available_credits < 100:
mqtt_out(
f"balance has dropped below 100 credits! (available: {available_credits})",
level="WARN",
)
for quota_name, quota_config in sorted(ib_account["quotas"].items()):
value = quota_config["count"]["value"]
limit = quota_config["count"]["limit"]
if value > limit * 0.9:
mqtt_out(
f"quota {quota_name} is over 90% (limit {limit}, value {value})",
level="ERROR",
)
elif value > limit * 0.8:
mqtt_out(
f"quota {quota_name} is over 80% (limit {limit}, value {value})",
level="WARN",
)
sleep(60)
if (
datetime.now(timezone.utc).strftime("%H%M") == "1312"
and online_devices
and int(datetime.now(timezone.utc).strftime("%S")) < 30
):
mqtt_out("Online Devices: {}".format(", ".join(sorted(online_devices))))
sleep(30)
except KeyboardInterrupt:
break

View file

@ -1,7 +1,10 @@
assert node.has_bundle('infobeamer-cms') # uses same venv
files['/opt/infobeamer-monitor/config.toml'] = {
'content': repo.libs.faults.dict_as_toml(node.metadata.get('infobeamer-monitor')),
'content_type': 'mako',
'context': {
'config': node.metadata.get('infobeamer-monitor'),
},
'triggers': {
'svc_systemd:infobeamer-monitor:restart',
},

View file

@ -63,7 +63,7 @@ def firewall(metadata):
return {
'firewall': {
'port_rules': {
'8096/tcp': atomic(metadata.get('jellyfin/restrict-to', set())),
'8096/tcp': atomic(metadata.get('jellyfin/restrict-to', {'*'})),
},
},
}

View file

@ -1,15 +0,0 @@
actions['modprobe_jool'] = {
'command': 'modprobe jool',
'unless': 'lsmod | grep -F jool',
}
actions['jool_add_nat64_instance'] = {
'command': 'jool instance add "nat64" --netfilter --pool6 64:ff9b::/96',
'unless': 'jool instance display --no-headers --csv | grep -E ",nat64,netfilter$"',
'needs': {
'action:modprobe_jool',
'pkg_apt:jool-dkms',
'pkg_apt:jool-tools',
'pkg_apt:linux-headers-amd64',
},
}

View file

@ -1,14 +0,0 @@
defaults = {
'apt': {
'packages': {
'jool-dkms': {},
'jool-tools': {},
'linux-headers-amd64': {},
},
},
'modules': {
'jool': [
'jool',
],
},
}

View file

@ -0,0 +1,4 @@
<%
from tomlkit import dumps as toml_dumps
from bundlewrap.utils.text import toml_clean
%>${toml_clean(toml_dumps(repo.libs.faults.resolve_faults(node.metadata.get('jugendhackt_tools')), sort_keys=True))}

View file

@ -47,7 +47,7 @@ actions['jugendhackt_tools_migrate'] = {
}
files['/opt/jugendhackt_tools/config.toml'] = {
'content': repo.libs.faults.dict_as_toml(node.metadata.get('jugendhackt_tools')),
'content_type': 'mako',
'triggers': {
'svc_systemd:jugendhackt_tools:restart',
},

View file

@ -1,8 +0,0 @@
# This file is managed using bundlewrap
% for identifier, modules in sorted(node.metadata.get('modules', {}).items()):
# ${identifier}
% for module in modules:
${module}
% endfor
% endfor

View file

@ -1,3 +0,0 @@
files['/etc/modules'] = {
'content_type': 'mako',
}

View file

@ -39,7 +39,6 @@ def cron(metadata):
'/usr/bin/dehydrated --cleanup',
],
'when': '04:{}:00'.format(node.magic_number % 60),
'exclude_from_monitoring': True,
},
},
},

View file

@ -3,9 +3,6 @@ repo:
bindAddress: '${node.metadata.get('matrix-media-repo/listen-addr', '127.0.0.1')}'
port: ${node.metadata.get('matrix-media-repo/port', 20090)}
logDirectory: '-'
logColors: false
jsonLogs: false
logLevel: 'info'
trustAnyForwardedAddress: false
useForwardedHost: true
@ -25,13 +22,10 @@ homeservers:
csApi: "${config['domain']}"
backoffAt: ${config.get('backoff_at', 10)}
adminApiKind: "${config.get('api', 'matrix')}"
% if config.get('signing_key_path'):
signingKeyPath: "${config['signing_key_path']}"
% endif
% endfor
accessTokens:
maxCacheTimeSeconds: 10
maxCacheTimeSeconds: 0
useLocalAppserviceConfig: false
admins:
@ -59,9 +53,7 @@ archiving:
uploads:
maxBytes: ${node.metadata.get('matrix-media-repo/upload_max_mb')*1024*1024}
minBytes: 100
#reportedMaxBytes: 0
maxPending: 5
maxAgeSeconds: 1800
reportedMaxBytes: 0
quotas:
enabled: false
@ -69,6 +61,14 @@ downloads:
maxBytes: ${node.metadata.get('matrix-media-repo/download_max_mb')*1024*1024}
numWorkers: ${node.metadata.get('matrix-media-repo/workers')}
failureCacheMinutes: 5
cache:
enabled: true
maxSizeBytes: ${node.metadata.get('matrix-media-repo/download_max_mb')*10*1024*1024}
maxFileSizeBytes: ${node.metadata.get('matrix-media-repo/download_max_mb')*1024*1024}
trackedMinutes: 30
minDownloads: 5
minCacheTimeSeconds: 300
minEvictedTimeSeconds: 60
expireAfterDays: 0
urlPreviews:
@ -137,8 +137,8 @@ thumbnails:
rateLimit:
enabled: true
requestsPerSecond: 100
burst: 5000
requestsPerSecond: 10
burst: 50
identicons:
enabled: true

View file

@ -19,6 +19,9 @@ files = {
'/opt/matrix-media-repo/config.yaml': {
'owner': 'matrix-media-repo',
'content_type': 'mako',
'triggers': {
'svc_systemd:matrix-media-repo:restart',
},
},
'/etc/systemd/system/matrix-media-repo.service': {
'triggers': {

View file

@ -1,27 +0,0 @@
<%
database = node.metadata.get('matrix-synapse/database')
db_string = 'postgresql://{}:{}@{}/{}?sslmode=disable'.format(
database['user'],
database['password'],
database.get('host', 'localhost'),
database['database'],
)
%>\
[Unit]
Description=matrix-org sliding-sync proxy
After=network.target
Requires=postgresql.service
[Service]
User=matrix-synapse
Group=matrix-synapse
Environment=SYNCV3_SERVER=https://${node.metadata.get('matrix-synapse/baseurl')}
Environment=SYNCV3_DB=${db_string}
Environment=SYNCV3_SECRET=${node.metadata.get('matrix-synapse/sliding_sync/secret')}
Environment=SYNCV3_BINDADDR=127.0.0.1:20070
ExecStart=/usr/local/bin/matrix-sliding-sync
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target

View file

@ -57,32 +57,3 @@ svc_systemd = {
},
},
}
if node.metadata.get('matrix-synapse/sliding_sync/version', None):
files['/usr/local/bin/matrix-sliding-sync'] = {
'content_type': 'download',
'source': 'https://github.com/matrix-org/sliding-sync/releases/download/{}/syncv3_linux_amd64'.format(
node.metadata.get('matrix-synapse/sliding_sync/version'),
),
'content_hash': node.metadata.get('matrix-synapse/sliding_sync/sha1', None),
'mode': '0755',
'triggers': {
'svc_systemd:matrix-sliding-sync:restart',
},
}
files['/usr/local/lib/systemd/system/matrix-sliding-sync.service'] = {
'content_type': 'mako',
'triggers': {
'action:systemd-reload',
'svc_systemd:matrix-sliding-sync:restart',
},
}
svc_systemd['matrix-sliding-sync'] = {
'needs': {
'file:/usr/local/bin/matrix-sliding-sync',
'file:/usr/local/lib/systemd/system/matrix-sliding-sync.service',
'postgres_db:synapse',
},
}

View file

@ -88,14 +88,6 @@ def nginx(metadata):
if not node.has_bundle('nginx'):
raise DoNotRunAgain
wellknown_client_sliding_sync = {}
if metadata.get('matrix-synapse/sliding_sync/version', None):
wellknown_client_sliding_sync = {
'org.matrix.msc3575.proxy': {
'url': 'https://{}'.format(metadata.get('matrix-synapse/baseurl')),
},
}
wellknown = {
'/.well-known/matrix/client': {
'content': dumps({
@ -105,7 +97,6 @@ def nginx(metadata):
'm.identity_server': {
'base_url': metadata.get('matrix-synapse/identity_server', 'https://matrix.org'),
},
**wellknown_client_sliding_sync,
**metadata.get('matrix-synapse/additional_client_config', {}),
}, sort_keys=True),
'return': 200,
@ -127,16 +118,10 @@ def nginx(metadata):
}
locations = {
'/_client/': {
'target': 'http://127.0.0.1:20070',
},
'/_matrix': {
'target': 'http://[::1]:20080',
'max_body_size': '50M',
},
'/_matrix/client/unstable/org.matrix.msc3575/sync': {
'target': 'http://127.0.0.1:20070',
},
'/_synapse': {
'target': 'http://[::1]:20080',
},
@ -144,8 +129,7 @@ def nginx(metadata):
}
if node.has_bundle('matrix-media-repo'):
for path in ('/_matrix/media', '/_matrix/client/v1/media', '/_matrix/federation/v1/media'):
locations[path] = {
locations['/_matrix/media'] = {
'target': 'http://localhost:20090',
'max_body_size': '{}M'.format(metadata.get('matrix-media-repo/upload_max_mb')),
# matrix-media-repo needs this to be the

View file

@ -1,15 +1,11 @@
#!/bin/bash
OPTS="--netrc"
OPTS="$OPTS --netrc-location /opt/mixcloud-downloader/netrc"
OPTS="$OPTS --retry-sleep linear=1::2"
OPTS="$OPTS --retry-sleep fragment:exp=1:60"
OPTS="$OPTS --extractor-retries 5"
OPTS=""
if [[ -n "$DEBUG" ]]
then
set -x
else
OPTS="$OPTS -q"
OPTS="-q"
fi
set -euo pipefail

View file

@ -1,3 +0,0 @@
% for domain, data in sorted(node.metadata.get('mixcloud-downloader/netrc', {}).items()):
machine ${domain} login ${data['username']} password ${data['password']}
% endfor

View file

@ -6,9 +6,3 @@ files['/opt/mixcloud-downloader/download.sh'] = {
directories['/opt/mixcloud-downloader'] = {
'owner': 'kunsi',
}
files['/opt/mixcloud-downloader/netrc'] = {
'content_type': 'mako',
'mode': '0400',
'owner': 'kunsi',
}

View file

@ -5,6 +5,12 @@ files = {
'svc_systemd:mosquitto:restart',
},
},
'/usr/local/bin/tasmota-telegraf-plugin': {
'mode': '0755',
'needs': {
'pkg_apt:python3-paho-mqtt',
},
},
}
svc_systemd = {
@ -17,12 +23,6 @@ svc_systemd = {
}
if node.has_bundle('telegraf'):
files['/usr/local/bin/tasmota-telegraf-plugin'] = {
'mode': '0755',
'needs': {
'pkg_apt:python3-paho-mqtt',
},
'triggers': {
files['/usr/local/bin/tasmota-telegraf-plugin']['triggers'] = {
'svc_systemd:telegraf:restart',
},
}

View file

@ -5,6 +5,7 @@ defaults = {
'packages': {
'mosquitto': {},
'mosquitto-clients': {},
'python3-paho-mqtt': {}, # for telegraf plugin
},
},
'icinga2_api': {
@ -23,9 +24,6 @@ defaults = {
},
}
if node.has_bundle('telegraf'):
defaults['apt']['packages']['python3-paho-mqtt'] = {}
@metadata_reactor.provides(
'firewall/port_rules',

View file

@ -1,40 +1,42 @@
users['netbox'] = {
users = {
'netbox': {
'home': '/opt/netbox',
},
}
directories['/opt/netbox/src'] = {}
directories['/opt/netbox/media'] = {
directories = {
'/opt/netbox/src': {},
'/opt/netbox/media': {
'owner': 'netbox',
}
directories['/opt/netbox/scripts'] = {
},
'/opt/netbox/scripts': {
'owner': 'netbox',
},
}
git_deploy['/opt/netbox/src'] = {
git_deploy = {
'/opt/netbox/src': {
'repo': 'https://github.com/netbox-community/netbox.git',
'rev': node.metadata.get('netbox/version'),
'triggers': {
'action:netbox_install',
'action:netbox_upgrade',
'svc_systemd:netbox-web:restart',
'svc_systemd:netbox-worker:restart',
},
'tags': {
'netbox-install',
},
}
# This is a recreation of https://github.com/netbox-community/netbox/blob/develop/upgrade.sh
actions['netbox_create_virtualenv'] = {
actions = {
'netbox_create_virtualenv': {
'command': '/usr/bin/python3 -m virtualenv -p python3 /opt/netbox/venv',
'unless': 'test -d /opt/netbox/venv/',
'needed_by': {
'action:netbox_install',
},
}
actions['netbox_install'] = {
},
'netbox_install': {
'triggered': True,
'command': ' && '.join([
'cd /opt/netbox/src',
@ -53,51 +55,37 @@ actions['netbox_install'] = {
'pkg_apt:libxslt1-dev',
'pkg_apt:python3-dev',
'pkg_apt:zlib1g-dev',
},
'tags': {
'netbox-install',
},
}
last_action = 'netbox_install'
for upgrade_command in (
'migrate',
'trace_paths --no-input',
'collectstatic --no-input',
'remove_stale_contenttypes --no-input',
'reindex --lazy',
'clearsessions',
):
actions[f'netbox_upgrade_{upgrade_command.split()[0]}'] = {
},
'netbox_upgrade': {
'triggered': True,
'command': f'/opt/netbox/venv/bin/python /opt/netbox/src/netbox/manage.py {upgrade_command}',
'command': ' && '.join([
'/opt/netbox/venv/bin/python /opt/netbox/src/netbox/manage.py migrate',
'/opt/netbox/venv/bin/python /opt/netbox/src/netbox/manage.py collectstatic --no-input',
'/opt/netbox/venv/bin/python /opt/netbox/src/netbox/manage.py remove_stale_contenttypes --no-input',
'/opt/netbox/venv/bin/python /opt/netbox/src/netbox/manage.py clearsessions',
]),
'needs': {
f'action:{last_action}',
'action:netbox_install',
'file:/opt/netbox/src/netbox/netbox/configuration.py',
},
'tags': {
'netbox-upgrade',
},
'triggered_by': {
'tag:netbox-install',
},
}
last_action = f'netbox_upgrade_{upgrade_command.split()[0]}'
files['/usr/local/lib/systemd/system/netbox-web.service'] = {
files = {
'/usr/local/lib/systemd/system/netbox-web.service': {
'triggers': {
'action:systemd-reload',
'svc_systemd:netbox-web:restart',
},
}
files['/usr/local/lib/systemd/system/netbox-worker.service'] = {
},
'/usr/local/lib/systemd/system/netbox-worker.service': {
'triggers': {
'action:systemd-reload',
'svc_systemd:netbox-worker:restart',
},
}
files['/opt/netbox/src/netbox/netbox/configuration.py'] = {
},
'/opt/netbox/src/netbox/netbox/configuration.py': {
'content_type': 'mako',
'triggers': {
'svc_systemd:netbox-web:restart',
@ -106,33 +94,31 @@ files['/opt/netbox/src/netbox/netbox/configuration.py'] = {
'needs': {
'git_deploy:/opt/netbox/src',
},
'tags': {
'netbox-install',
},
}
files['/opt/netbox/gunicorn_config.py'] = {
'/opt/netbox/gunicorn_config.py': {
'content_type': 'mako',
'triggers': {
'svc_systemd:netbox-web:restart',
},
},
}
svc_systemd['netbox-web'] = {
svc_systemd = {
'netbox-web': {
'needs': {
'action:netbox_install',
'action:netbox_upgrade',
'file:/usr/local/lib/systemd/system/netbox-web.service',
'file:/opt/netbox/gunicorn_config.py',
'file:/opt/netbox/src/netbox/netbox/configuration.py',
'tag:netbox-install',
'tag:netbox-upgrade',
},
}
svc_systemd['netbox-worker'] = {
},
'netbox-worker': {
'needs': {
'action:netbox_install',
'action:netbox_upgrade',
'file:/usr/local/lib/systemd/system/netbox-worker.service',
'file:/opt/netbox/src/netbox/netbox/configuration.py',
'tag:netbox-install',
'tag:netbox-upgrade',
},
},
}

View file

@ -23,8 +23,9 @@ table inet filter {
icmp type timestamp-request drop
icmp type timestamp-reply drop
meta l4proto {icmp, ipv6-icmp} accept
ip protocol icmp accept
ip6 nexthdr ipv6-icmp accept
% for ruleset, rules in sorted(input.items()):
# ${ruleset}

View file

@ -29,7 +29,7 @@ defaults = {
},
}
if not node.has_bundle('vmhost') and not node.has_bundle('docker-engine'):
if not node.has_bundle('vmhost'):
# see comment in bundles/vmhost/items.py
defaults['apt']['packages']['iptables'] = {
'installed': False,

View file

@ -10,9 +10,6 @@ events {
http {
include /etc/nginx/mime.types;
types {
application/javascript mjs;
}
default_type application/octet-stream;
charset UTF-8;
override_charset on;

View file

@ -29,9 +29,8 @@ server {
root ${webroot if webroot else '/var/www/{}/'.format(vhost)};
index ${' '.join(index)};
listen 443 ssl;
listen [::]:443 ssl;
http2 on;
listen 443 ssl http2;
listen [::]:443 ssl http2;
% if ssl == 'letsencrypt':
ssl_certificate /var/lib/dehydrated/certs/${domain}/fullchain.pem;
@ -71,9 +70,8 @@ server {
root ${webroot if webroot else '/var/www/{}/'.format(vhost)};
index ${' '.join(index)};
listen 443 ssl;
listen [::]:443 ssl;
http2 on;
listen 443 ssl http2;
listen [::]:443 ssl http2;
% if ssl == 'letsencrypt':
ssl_certificate /var/lib/dehydrated/certs/${domain}/fullchain.pem;
@ -82,13 +80,12 @@ server {
ssl_certificate /etc/nginx/ssl/${vhost}.crt;
ssl_certificate_key /etc/nginx/ssl/${vhost}.key;
% endif
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
ssl_dhparam /etc/ssl/certs/dhparam.pem;
ssl_prefer_server_ciphers off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_tickets off;
ssl_session_timeout 1d;
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
% endif
@ -149,18 +146,18 @@ server {
% if 'target' in options:
proxy_pass ${options['target']};
proxy_http_version ${options.get('http_version', '1.1')};
proxy_set_header Host ${options.get('proxy_pass_host', domain)};
proxy_set_header Host ${domain};
% if options.get('websockets', False):
proxy_set_header Connection "upgrade";
proxy_set_header Upgrade $http_upgrade;
% endif
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host ${options.get('x_forwarded_host', options.get('proxy_pass_host', domain))};
proxy_set_header X-Forwarded-Host ${options.get('x_forwarded_host', domain)};
% for option, value in options.get('proxy_set_header', {}).items():
proxy_set_header ${option} ${value};
% endfor
% if location != '/' and location != '= /':
% if location != '/':
proxy_set_header X-Script-Name ${location};
% endif
proxy_buffering off;
@ -201,8 +198,6 @@ server {
fastcgi_hide_header X-XSS-Protection;
% endif
fastcgi_hide_header Permissions-Policy;
fastcgi_request_buffering off;
proxy_buffering off;
}
% if not max_body_size:
client_max_body_size 5M;

View file

@ -78,10 +78,17 @@ if node.has_bundle('pacman'):
},
}
actions = {
'nginx-generate-dhparam': {
'command': 'openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048',
'unless': 'test -f /etc/ssl/certs/dhparam.pem',
},
}
svc_systemd = {
'nginx': {
'needs': {
'action:generate-dhparam',
'action:nginx-generate-dhparam',
'directory:/var/log/nginx-timing',
package,
},

View file

@ -200,8 +200,8 @@ def telegraf_anon_timing(metadata):
result[f'nginx-{vname}'] = {
'files': [f'/var/log/nginx-timing/{vname}.log'],
'from_beginning': False,
'grok_patterns': [r'%{LOGPATTERN}'],
'grok_custom_patterns': r'LOGPATTERN \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:request_time:float} (?:%{NUMBER:upstream_response_time:float}|-) "%{WORD:verb:tag} %{NOTSPACE:request} HTTP/%{NUMBER:http_version:float}" %{NUMBER:resp_code:tag}',
'grok_patterns': ['%{LOGPATTERN}'],
'grok_custom_patterns': 'LOGPATTERN \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:request_time:float} (?:%{NUMBER:upstream_response_time:float}|-) "%{WORD:verb:tag} %{NOTSPACE:request} HTTP/%{NUMBER:http_version:float}" %{NUMBER:resp_code:tag}',
'data_format': 'grok',
'name_override': 'nginx_timing',
}

View file

@ -0,0 +1,9 @@
actions = {
'nodejs_install_yarn': {
'command': 'npm install -g yarn@latest',
'unless': 'test -e /usr/lib/node_modules/yarn',
'after': {
'pkg_apt:',
},
},
}

View file

@ -1,40 +1,53 @@
defaults = {
'apt': {
'additional_update_commands': {
# update npm and yarn to latest version
'npm install -g npm@latest',
# update npm to latest version
'npm install -g yarn@latest',
},
'packages': {
'nodejs': {},
},
},
'nodejs': {
'triggers': {
'action:apt_execute_update_commands',
},
},
'npm': {
'installed': False,
'triggers': {
'action:apt_execute_update_commands',
},
},
},
'version': 18,
},
}
VERSIONS_SHIPPED_BY_DEBIAN = {
10: 10,
11: 12,
12: 18,
13: 18,
}
@metadata_reactor.provides(
'apt/repos/nodejs/items',
)
def nodejs_from_version(metadata):
version = metadata.get('nodejs/version')
if version != VERSIONS_SHIPPED_BY_DEBIAN[node.os_version[0]]:
return {
'apt': {
'additional_update_commands': {
# update npm to latest version
'npm install -g npm@latest',
},
'repos': {
'nodejs': {
'items': {
f'deb https://deb.nodesource.com/node_{version}.x nodistro main',
f'deb https://deb.nodesource.com/node_{version}.x {{os_release}} main',
f'deb-src https://deb.nodesource.com/node_{version}.x {{os_release}} main',
},
},
},
},
}
else:
return {
'apt': {
'packages': {
'npm': {},
},
},
}

View file

@ -0,0 +1,25 @@
from os.path import join
directories = {
'/etc/openvpn/client': {
'mode': '0750',
'owner': 'openvpn',
'group': None,
'purge': True,
},
}
for fname, config in node.metadata.get('openvpn-client/configs', {}).items():
files[f'/etc/openvpn/client/{fname}.conf'] = {
'content': repo.vault.decrypt_file(join('openvpn-client', f'{fname}.conf.vault')),
'triggers': {
f'svc_systemd:openvpn-client@{config}:restart',
} if config.get('running', True) else set(),
}
svc_systemd[f'openvpn-client@{fname}'] = {
'needs': {
f'file:/etc/openvpn/client/{fname}.conf',
},
**config,
}

View file

@ -0,0 +1,20 @@
defaults = {
'apt': {
'packages': {
'openvpn': {
'needed_by': {
'directory:/etc/openvpn/client',
},
},
},
},
'pacman': {
'packages': {
'openvpn': {
'needed_by': {
'directory:/etc/openvpn/client',
},
},
},
},
}

View file

@ -36,13 +36,13 @@ pkg_pacman = {
'at': {},
'autoconf': {},
'automake': {},
'bind': {},
'binutils': {},
'bison': {},
'bzip2': {},
'curl': {},
'dialog': {},
'diffutils': {},
'dnsutils': {},
'fakeroot': {},
'file': {},
'findutils': {},

View file

@ -8,9 +8,6 @@ Group=paperless
Environment=PAPERLESS_CONFIGURATION_PATH=/opt/paperless/paperless.conf
WorkingDirectory=/opt/paperless/src/paperless-ngx/src
ExecStart=/opt/paperless/venv/bin/python manage.py document_consumer
Restart=always
RestartSec=10
SyslogIdentifier=paperless-consumer
[Install]
WantedBy=multi-user.target

View file

@ -8,9 +8,6 @@ Group=paperless
Environment=PAPERLESS_CONFIGURATION_PATH=/opt/paperless/paperless.conf
WorkingDirectory=/opt/paperless/src/paperless-ngx/src
ExecStart=/opt/paperless/venv/bin/celery --app paperless beat --loglevel INFO
Restart=always
RestartSec=10
SyslogIdentifier=paperless-scheduler
[Install]
WantedBy=multi-user.target

View file

@ -8,9 +8,6 @@ Group=paperless
Environment=PAPERLESS_CONFIGURATION_PATH=/opt/paperless/paperless.conf
WorkingDirectory=/opt/paperless/src/paperless-ngx/src
ExecStart=/opt/paperless/venv/bin/celery --app paperless worker --loglevel INFO
Restart=always
RestartSec=10
SyslogIdentifier=paperless-taskqueue
[Install]
WantedBy=multi-user.target

View file

@ -10,9 +10,6 @@ Group=paperless
Environment=PAPERLESS_CONFIGURATION_PATH=/opt/paperless/paperless.conf
WorkingDirectory=/opt/paperless/src/paperless-ngx/src
ExecStart=/opt/paperless/venv/bin/gunicorn -c /opt/paperless/src/paperless-ngx/gunicorn.conf.py -b 127.0.0.1:22070 paperless.asgi:application
Restart=always
RestartSec=10
SyslogIdentifier=paperless-webserver
[Install]
WantedBy=multi-user.target

View file

@ -30,8 +30,6 @@ PAPERLESS_CORS_ALLOWED_HOSTS=http://${node.metadata.get('paperless/domain')},htt
PAPERLESS_OCR_LANGUAGE=${'+'.join(sorted(node.metadata.get('paperless/ocr_languages', {'deu', 'eng'})))}
PAPERLESS_OCR_MODE=skip
PAPERLESS_OCR_SKIP_ARCHIVE_FILE=never
PAPERLESS_OCR_USER_ARGS='{"invalidate_digital_signatures": true}'
PAPERLESS_PRE_CONSUME_SCRIPT=/opt/paperless/pre-consume.sh
#PAPERLESS_OCR_OUTPUT_TYPE=pdfa
#PAPERLESS_OCR_PAGES=1
#PAPERLESS_OCR_IMAGE_DPI=300

View file

@ -1,11 +0,0 @@
#!/bin/bash
[[ -n "$DEBUG" ]] && set -x
set -euo pipefail
pdfinfo "${DOCUMENT_WORKING_PATH}" | grep -q "Encrypted:"
if pdfinfo "${DOCUMENT_WORKING_PATH}" | grep -q "Encrypted: yes"
then
qpdf --replace-input --decrypt "${DOCUMENT_WORKING_PATH}"
fi

View file

@ -15,10 +15,6 @@ files['/opt/paperless/paperless.conf'] = {
},
}
files['/opt/paperless/pre-consume.sh'] = {
'mode': '0755',
}
actions['paperless_create_virtualenv'] = {
'command': '/usr/bin/python3 -m virtualenv -p python3 /opt/paperless/venv/',
'unless': 'test -d /opt/paperless/venv/',

View file

@ -11,7 +11,6 @@ defaults = {
'mariadb-client': {},
'mime-support': {},
'optipng': {},
'poppler-utils': {},
'python3-wheel': {},
# for OCRmyPDF
@ -33,9 +32,6 @@ defaults = {
'/mnt/paperless',
},
},
'nodejs': {
'version': 18,
},
'postgresql': {
'roles': {
'paperless': {

View file

@ -1,5 +1,3 @@
devnull@${node.metadata.get('postfix/myhostname')} DISCARD DEV-NULL
% for address in sorted(blocked):
${address} REJECT
% endfor

View file

@ -3,7 +3,7 @@ biff = no
append_dot_mydomain = no
readme_directory = no
compatibility_level = 2
myhostname = ${node.metadata.get('postfix/myhostname')}
myhostname = ${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}
myorigin = /etc/mailname
mydestination = $myhostname, localhost
mynetworks = ${' '.join(sorted(mynetworks))}
@ -25,6 +25,7 @@ inet_interfaces = 127.0.0.1
% endif
<%text>
smtp_use_tls = yes
smtp_tls_loglevel = 1
smtp_tls_note_starttls_offer = yes
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
@ -37,8 +38,8 @@ smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt
</%text>
% if node.has_bundle('postfixadmin'):
smtpd_tls_cert_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/fullchain.pem
smtpd_tls_key_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname')}/privkey.pem
smtpd_tls_cert_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/fullchain.pem
smtpd_tls_key_file = /var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/privkey.pem
<%text>
smtpd_use_tls=yes
smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
@ -47,18 +48,19 @@ smtpd_client_restrictions = permit_mynetworks permit_sasl_authenticated
smtpd_helo_required = yes
smtpd_helo_restrictions = permit_mynetworks reject_invalid_helo_hostname
smtpd_data_restrictions = reject_unauth_pipelining
smtpd_recipient_restrictions = check_recipient_access hash:/etc/postfix/blocked_recipients, permit_mynetworks
smtpd_recipient_restrictions = permit_mynetworks, check_recipient_access hash:/etc/postfix/blocked_recipients
smtpd_relay_before_recipient_restrictions = yes
# https://ssl-config.mozilla.org/#server=postfix&version=3.7.10&config=intermediate&openssl=3.0.11&guideline=5.7
# generated using mozilla ssl generator, using "old" configuration.
# we need this to support CentOS 7 systems, sadly ...
# https://ssl-config.mozilla.org/#server=postfix&version=3.5.13&config=old&openssl=1.1.1k&guideline=5.6
smtpd_tls_security_level = may
smtpd_tls_auth_only = yes
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtpd_tls_protocols = !SSLv2, !SSLv3, !TLSv1, !TLSv1.1
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3
smtpd_tls_protocols = !SSLv2, !SSLv3
smtpd_tls_mandatory_ciphers = medium
smtpd_tls_dh1024_param_file = /etc/ssl/certs/dhparam.pem
tls_medium_cipherlist = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
tls_preempt_cipherlist = no
tls_medium_cipherlist = ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA
tls_preempt_cipherlist = yes
</%text>
relay_domains = $mydestination, pgsql:/etc/postfix/pgsql/relay_domains.cf

View file

@ -25,7 +25,7 @@ my_package = 'pkg_pacman:postfix' if node.os == 'arch' else 'pkg_apt:postfix'
files = {
'/etc/mailname': {
'content': node.metadata.get('postfix/myhostname'),
'content': node.metadata.get('postfix/myhostname', node.metadata['hostname']),
'before': {
my_package,
},

View file

@ -87,7 +87,7 @@ def letsencrypt(metadata):
}
result['domains'] = {
metadata.get('postfix/myhostname'): set(),
metadata.get('postfix/myhostname', metadata.get('hostname')): set(),
}
return {
@ -148,14 +148,3 @@ def icinga2(metadata):
},
},
}
@metadata_reactor.provides(
'postfix/myhostname',
)
def myhostname(metadata):
return {
'postfix': {
'myhostname': metadata.get('hostname'),
},
}

View file

@ -57,7 +57,7 @@ files = {
},
}
if node.has_bundle('backup-client'):
if node.has_bundle('backup-client') and not node.has_bundle('zfs'):
files['/etc/backup-pre-hooks.d/90-postgresql-dump-all'] = {
'source': 'backup-pre-hook',
'content_type': 'mako',
@ -67,6 +67,10 @@ if node.has_bundle('backup-client'):
'mode': '0700',
}
directories['/var/tmp/postgresdumps'] = {}
else:
files['/var/tmp/postgresdumps'] = {
'delete': True,
}
postgres_roles = {
'root': {

View file

@ -11,7 +11,6 @@ defaults = {
'backups': {
'paths': {
'/var/lib/postgresql',
'/var/tmp/postgresdumps',
},
},
'bash_functions': {
@ -75,6 +74,8 @@ if node.has_bundle('zfs'):
},
},
}
else:
defaults['backups']['paths'].add('/var/tmp/postgresdumps')
@metadata_reactor.provides(

View file

@ -3,8 +3,6 @@ from os import listdir
from os.path import isfile, join
from subprocess import check_output
from bundlewrap.utils.ui import io
zone_path = join(repo.path, 'data', 'powerdns', 'files', 'bind-zones')
nameservers = set()
@ -67,7 +65,7 @@ svc_systemd = {
actions = {
'powerdns_reload_zones': {
'triggered': True,
'command': r'pdns_control rediscover; pdns_control reload; pdns_control notify \*',
'command': 'pdns_control rediscover; pdns_control reload; pdns_control notify \*',
'after': {
'svc_systemd:pdns',
},
@ -81,10 +79,9 @@ if node.metadata.get('powerdns/features/bind', False):
continue
try:
output = check_output(['git', 'log', '-1', '--pretty=%ci']).decode('utf-8').strip()
output = check_output(['git', 'log', '-1', '--pretty=%ci', join(zone_path, zone)]).decode('utf-8').strip()
serial = datetime.strptime(output, '%Y-%m-%d %H:%M:%S %z').strftime('%y%m%d%H%M')
except Exception as e:
io.stderr(f"Error while parsing commit time for {zone} serial: {e!r}")
except:
serial = datetime.now().strftime('%y%m%d0000')
primary_zones.add(zone)
@ -163,7 +160,7 @@ if node.metadata.get('powerdns/features/pgsql', node.has_bundle('postgresql')):
actions['powerdns_load_pgsql_schema'] = {
'command': node.metadata.get('postgresql/roles/powerdns/password').format_into('PGPASSWORD={} psql -h 127.0.0.1 -d powerdns -U powerdns -w < /usr/share/pdns-backend-pgsql/schema/schema.pgsql.sql'),
'unless': r'sudo -u postgres psql -d powerdns -c "\dt" | grep domains 2>&1 >/dev/null',
'unless': 'sudo -u postgres psql -d powerdns -c "\dt" | grep domains 2>&1 >/dev/null',
'needs': {
'bundle:postgresql',
'pkg_apt:pdns-backend-pgsql',

View file

@ -143,14 +143,11 @@ def generate_dns_entries_for_nodes(metadata):
if not ip6 and not ip.is_private:
ip6 = ip
if not (ip4 or ip6) and (found_ips['ipv4'] or found_ips['ipv6']):
if not (ip4 or ip6) and found_ips['ipv4']:
# do it again, but do not filter out private addresses
for ip in sorted(found_ips['ipv4']):
if not ip4:
ip4 = ip
for ip in sorted(found_ips['ipv6']):
if not ip6:
ip6 = ip
if ip4:
results.add('{} IN A {}'.format(dns_name, ip4))

View file

@ -71,8 +71,8 @@ actions = {
'chown -R powerdnsadmin:powerdnsadmin /opt/powerdnsadmin/src/powerdnsadmin/static/',
]),
'needs': {
'action:nodejs_install_yarn',
'action:powerdnsadmin_install_deps',
'bundle:nodejs',
'pkg_apt:',
},
},

View file

@ -13,9 +13,6 @@ defaults = {
'python3-wheel': {},
},
},
'nodejs': {
'version': 18,
},
'users': {
'powerdnsadmin': {
'home': '/opt/powerdnsadmin',

View file

@ -7,6 +7,7 @@ from subprocess import check_output
from requests import get
UPDATE_URL = '${url}'
USERNAME = '${username}'
PASSWORD = '${password}'

View file

@ -5,6 +5,7 @@ from ipaddress import ip_address
from json import loads
from subprocess import check_output, run
DOMAIN = '${domain}'
# <%text>

View file

@ -1,5 +1,5 @@
assert node.has_bundle('redis'), f'{node.name}: pretalx needs redis'
assert node.has_bundle('nodejs'), f'{node.name}: pretalx needs nodejs for rebuild step'
assert node.has_bundle('nodejs'), f'{node.name}: pretalx needs nodejs for rebuild and regenerate_css step'
actions = {
'pretalx_create_virtualenv': {
@ -53,6 +53,17 @@ actions = {
},
'triggered': True,
},
'pretalx_regenerate-css': {
'command': 'sudo -u pretalx PRETALX_CONFIG_FILE=/opt/pretalx/pretalx.cfg /opt/pretalx/venv/bin/python -m pretalx regenerate_css',
'needs': {
'action:pretalx_migrate',
'directory:/opt/pretalx/data',
'directory:/opt/pretalx/static',
'file:/opt/pretalx/pretalx.cfg',
'bundle:nodejs',
},
'triggered': True,
},
}
users = {
@ -79,6 +90,7 @@ git_deploy = {
'action:pretalx_install',
'action:pretalx_migrate',
'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'svc_systemd:pretalx-web:restart',
'svc_systemd:pretalx-worker:restart',
},
@ -109,6 +121,7 @@ svc_systemd = {
'action:pretalx_install',
'action:pretalx_migrate',
'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'file:/etc/systemd/system/pretalx-web.service',
'file:/opt/pretalx/pretalx.cfg',
},
@ -116,8 +129,7 @@ svc_systemd = {
'pretalx-worker': {
'needs': {
'action:pretalx_install',
'action:pretalx_migrate',,
'action:pretalx_rebuild',
'action:pretalx_migrate',
'file:/etc/systemd/system/pretalx-worker.service',
'file:/opt/pretalx/pretalx.cfg',
},
@ -192,6 +204,7 @@ for plugin_name, plugin_config in node.metadata.get('pretalx/plugins', {}).items
'triggers': {
'action:pretalx_migrate',
'action:pretalx_rebuild',
'action:pretalx_regenerate-css',
'svc_systemd:pretalx-web:restart',
'svc_systemd:pretalx-worker:restart',
},

View file

@ -26,9 +26,6 @@ defaults = {
},
},
},
'nodejs': {
'version': 18,
},
'pretalx': {
'database': {
'user': 'pretalx',

View file

@ -1,13 +0,0 @@
files['/etc/proftpd/proftpd.conf'] = {
'source': f'{node.name}.conf',
'triggers': {
'svc_systemd:proftpd:restart',
},
}
svc_systemd['proftpd'] = {
'needs': {
'file:/etc/proftpd/proftpd.conf',
'pkg_apt:proftpd-core',
},
}

View file

@ -1,26 +0,0 @@
from bundlewrap.metadata import atomic
defaults = {
'apt': {
'packages': {
'proftpd-core': {},
},
},
}
@metadata_reactor.provides(
'firewall/port_rules',
)
def firewall(metadata):
sources = atomic(metadata.get('mosquitto/restrict-to', set()))
return {
'firewall': {
'port_rules': {
'20/tcp': sources,
'21/tcp': sources,
'49152-50192/tcp': sources,
},
},
}

View file

@ -1,28 +0,0 @@
from shlex import quote
directories = {
'/opt/pyenv': {},
'/opt/pyenv/install': {},
}
git_deploy = {
'/opt/pyenv/install': {
'repo': 'https://github.com/pyenv/pyenv.git',
'rev': node.metadata.get('pyenv/version'),
'needs': {
'directory:/opt/pyenv/install',
},
},
}
for version in node.metadata.get('pyenv/python_versions', set()):
actions[f'pyenv_install_{version}'] = {
'command': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv install {quote(version)}',
'unless': f'PYENV_ROOT=/opt/pyenv /opt/pyenv/install/bin/pyenv versions --bare | grep -E "^{quote(version)}$"',
'needs': {
'git_deploy:/opt/pyenv/install',
},
'after': {
'pkg_apt:',
},
}

View file

@ -1,20 +0,0 @@
defaults = {
'apt': {
'packages': {
'build-essential': {},
'curl': {},
'libbz2-dev': {},
'libffi-dev': {},
'liblzma-dev': {},
'libncurses-dev': {},
'libreadline-dev': {},
'libsqlite3-dev': {},
'libssl-dev': {},
'libxml2-dev': {},
'libxmlsec1-dev': {},
'tk-dev': {},
'xz-utils': {},
'zlib1g-dev': {},
},
},
}

View file

@ -2,8 +2,8 @@
interface ${interface}
{
AdvSendAdvert on;
MinRtrAdvInterval 60;
MaxRtrAdvInterval 300;
MinRtrAdvInterval 10;
MaxRtrAdvInterval 30;
MinDelayBetweenRAs 10;
prefix ${config.get('prefix', '::/64')}
{
@ -11,7 +11,7 @@ interface ${interface}
AdvAutonomous on;
AdvRouterAddr on;
};
% if config.get('rdnss'):
% if 'rdnss' in config:
RDNSS ${' '.join(sorted(config['rdnss']))}
{
AdvRDNSSLifetime 900;

View file

@ -1,30 +1,22 @@
disable_overscan=1
hdmi_force_hotplug=1
dtparam=spi=on
dtparam=audio=on
dtoverlay=vc4-kms-v3d
dtoverlay=vc4-fkms-v3d
max_framebuffers=2
hdmi_drive=2
force_turbo=1
gpu_mem=${node.metadata.get('raspberrypi/gpu_mem', 128)}
% if node.metadata.get('raspberrypi/enable_display'):
display_auto_detect=1
% else:
dtparam=i2c_arm=on
dtparam=i2s=on
dtparam=spi=on
hdmi_drive=2
hdmi_force_hotplug=1
% endif
gpu_mem=${node.metadata['raspberrypi'].get('gpu_mem', 128)}
% if node.os == 'debian':
arm_64bit=1
% endif
arm_boost=1
% for item in sorted(node.metadata.get('raspberrypi/config.txt', set())):
% for item in sorted(node.metadata['raspberrypi'].get('config.txt', set())):
${item}
% endfor
% if node.metadata.get('raspberrypi/enable_camera', False):
camera_auto_detect=1
% if node.metadata['raspberrypi'].get('camera', False):
start_x=1
% endif

View file

@ -15,11 +15,11 @@ actions = {
}
files = {
'/boot/firmware/cmdline.txt': {
'/boot/cmdline.txt': {
'content': ' '.join(sorted(node.metadata['raspberrypi']['cmdline'])),
**file_perms,
},
'/boot/firmware/config.txt': {
'/boot/config.txt': {
'content_type': 'mako',
'context': node.metadata['raspberrypi'],
**file_perms,

Some files were not shown because too many files have changed in this diff Show more