Compare commits

..

1 commit

Author SHA1 Message Date
Franzi 35cc90d678
bundles/voc-loudness-monitor: first try with gstreamer.sh
All checks were successful
bundlewrap/pipeline/head This commit looks good
2020-08-20 11:21:32 +02:00
781 changed files with 1501 additions and 39098 deletions

View file

@ -1,27 +0,0 @@
root = true
[*]
indent_style = space
indent_size = 4
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.yaml]
indent_size = 2
[*.exs]
indent_size = 2
# possibly sql dumps
[*.sql]
indent_size = unset
# bundlewrap encrypted files
[*.vault]
end_of_line = unset
insert_final_newline = unset
[*.json]
insert_final_newline = unset

4
.gitignore vendored
View file

@ -1,3 +1 @@
.secrets.cfg*
__pycache__
*.swp
.secrets.cfg

55
Jenkinsfile vendored
View file

@ -5,59 +5,50 @@ pipeline {
steps {
sh """
[ -d venv ] && rm -rf venv
virtualenv -p python3 venv
. venv/bin/activate
pip install --upgrade pip isort
pip install --upgrade pip
pip install -r requirements.txt
"""
}
}
stage('tests') {
stage('bw test') {
parallel {
stage('syntax checking using editorconfig-checker') {
steps {
sh """
wget -Oec-linux-amd64.tar.gz https://github.com/editorconfig-checker/editorconfig-checker/releases/latest/download/ec-linux-amd64.tar.gz
tar -xzf ec-linux-amd64.tar.gz && rm ec-linux-amd64.tar.gz
bin/ec-linux-amd64 -no-color -exclude '^bin/'
"""
}
}
stage('config and metadata determinism') {
stage('ignore missing vaults') {
steps {
sh """
. venv/bin/activate
export BW_VAULT_DUMMY_MODE=1
export BW_PASS_DUMMY_MODE=1
bw test --metadata-determinism 3 --config-determinism 3
"""
}
}
stage('bw test -i') {
steps {
sh """
. venv/bin/activate
bw test --ignore-missing-faults
"""
}
}
stage('dummy mode') {
when {
branch 'master'
}
steps {
sh """
. venv/bin/activate
export BW_VAULT_DUMMY_MODE=1
export BW_PASS_DUMMY_MODE=1
bw test
"""
}
}
stage('determinism') {
steps {
sh """
. venv/bin/activate
export BW_VAULT_DUMMY_MODE=1
bw test --metadata-determinism 3 --config-determinism 3
"""
}
}
}
}
}
post {
always {
sh """
rm -rf venv
rm -rf bin
"""
sh 'rm -rf venv'
}
}
}

View file

@ -1,58 +0,0 @@
# Port Mapping Table
All the ports which are used by bundles. Collected here to be able to
easily find available ports for other bundles.
## TCP
Rule of thumb: keep ports below 10000 free for stuff that reserves ports.
| Port | bundle | usage |
| ----------- | -------------------- | ----- |
| 22 | openssh | sshd |
| 25 | postfix | postfix postscreen |
| 53 | powerdns | dns server |
| 80 | nginx | http |
| 113 | oidentd | oidentd |
| 143 | dovecot | dovecot imap |
| 443 | nginx | https |
| 587 | postfix | postfix submission |
| 993 | dovecot | dovecot imap |
| 2525 | postfix | postfix postscreen |
| 4190 | dovecot | dovecot managesieve |
| 5232 | radicale | radicale |
| 5432 | postgresql | postgres |
| 5900 | vmhost | qemu-system-x86 |
| 6379 | redis | redis |
| 6667 | | bitlbee |
| 8086 | influxdb2 | influx |
| 11332-11334 | rspamd | rspamd |
| 20000 | mx-puppet-discord | Bridge |
| 20010 | mautrix-telegram | Bridge |
| 20020 | mautrix-whatsapp | Bridge |
| 20030 | matrix-dimension | Matrix Integrations Manager|
| 20070 | matrix-synapse | sliding-sync |
| 20080 | matrix-synapse | client, federation |
| 20081 | matrix-synapse | prometheus metrics |
| 20090 | matrix-media-repo | media_repo |
| 20090 | matrix-media-repo | prometheus metrics |
| 21010 | grafana | grafana |
| 22000 | forgejo | forgejo |
| 22010 | jenkins-ci | Jenkins CI |
| 22020 | travelynx | Travelynx Web |
| 22030 | octoprint | OctoPrint Web Interface |
| 22040 | miniflux | Miniflux Web Interface |
| 22050 | radicale | radicale carddav and caldav server |
| 22060 | pretalx | gunicorn |
| 22070 | paperless-ng | gunicorn |
| 22080 | netbox | gunicorn |
| 22090 | jugendhackt_tools | gunicorn |
| 22100 | powerdnsadmin | gunicorn |
| 22110 | icinga2-statuspage | gunicorn |
| 22999 | nginx | stub_status |
| 22100 | ntfy | http |
## UDP
| Port | bundle | usage |
| ----------- | -------------------- | ----- |
| 53 | powerdns | dns server |
| 15000-15100 | voc-loudness-monitor | ffmpeg processes outputting rtp streams |

View file

@ -6,17 +6,4 @@ May also include some dummy nodes, for example for deploying websites
onto shared webhosting.
`bw test` runs according to Jenkinsfile after every commit.
[![Build Status](https://jenkins.franzi.business/buildStatus/icon?job=kunsi%2Fbundlewrap%2Fmain)](https://jenkins.franzi.business/job/kunsi/job/bundlewrap/job/main/)
## automatix
Ensure you set `bundlewrap: true` in your `~/.automatix.cfg.yaml`.
## system naming
All systems should be named after their location and use.
For example, influxdb hosted at hetzner cloud will be `htz-cloud.influxdb`.
The only exception to this are name servers, they are named after [demons
in fiction](https://en.wikipedia.org/wiki/List_of_demons_in_fiction).
[![Build Status](https://jenkins.kunsmann.eu/buildStatus/icon?job=bundlewrap%2Fmaster)](https://jenkins.kunsmann.eu/job/bundlewrap/job/master/)

View file

@ -1,45 +0,0 @@
name: Upgrade to debian bullseye
systems:
node: foonode
always:
- has_zfs=python: NODES.node.has_bundle('zfs')
pipeline:
- manual: "set icinga2 downtime: https://icinga.franzi.business/monitoring/host/schedule-downtime?host={SYSTEMS.node}"
# apply first so we only see the upgrade changes later
- local: bw apply {SYSTEMS.node}
- manual: update debian version in node groups
- local: "bw apply -o bundle:apt -s symlink:/usr/bin/python pkg_apt: -- {SYSTEMS.node}"
# double time!
- remote@node: DEBIAN_FRONTEND=noninteractive apt-get -y -q -o Dpkg::Options::=--force-confold dist-upgrade
- remote@node: DEBIAN_FRONTEND=noninteractive apt-get -y -q -o Dpkg::Options::=--force-confold dist-upgrade
# reboot into bullseye
- remote@node: systemctl reboot
- local: |
exit=1
while [[ $exit -ne 0 ]];
do
sleep 1
ssh {SYSTEMS.node} true
exit=$?
done
# fix zfs and reboot again
- has_zfs?remote@node: zpool import tank -f
- has_zfs?remote@node: zpool upgrade -a
- has_zfs?remote@node: systemctl reboot
- has_zfs?local: |
exit=1
while [[ $exit -ne 0 ]];
do
sleep 1
ssh {SYSTEMS.node} true
exit=$?
done
# final apply
- local: bw apply {SYSTEMS.node}

View file

@ -0,0 +1,3 @@
APT::Periodic::Update-Package-Lists "1";
APT::Periodic::Unattended-Upgrade "1";
APT::Periodic::AutocleanInterval "7";

View file

@ -0,0 +1,27 @@
Unattended-Upgrade::Origins-Pattern {
"origin=Debian,codename=${node.metadata['os_release']},label=Debian";
"origin=Debian,codename=${node.metadata['os_release']},label=Debian-Security";
// External packages
% for item in sorted(data.get('origins', set())):
"${item}";
% endfor
};
Unattended-Upgrade::AutoFixInterruptedDpkg "true";
Unattended-Upgrade::MinimalSteps "false";
% if data.get('mail', None):
Unattended-Upgrade::Mail "${data['mail']}";
Unattended-Upgrade::MailOnlyOnError "false";
% endif
Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
Unattended-Upgrade::Remove-Unused-Dependencies "true";
% if data.get('reboot', True):
Unattended-Upgrade::Automatic-Reboot "true";
% else:
Unattended-Upgrade::Automatic-Reboot "false";
% endif

View file

@ -1,38 +0,0 @@
#!/bin/bash
statusfile="/var/tmp/unattended_upgrades.status"
if ! [[ -f "$statusfile" ]]
then
echo "Status file not found"
exit 3
fi
mtime=$(stat -c %Y $statusfile)
now=$(date +%s)
if (( $now - $mtime > 60*60*24*8 ))
then
echo "Status file is older than 8 days!"
exit 3
fi
exitcode=$(cat $statusfile)
case "$exitcode" in
abort_ssh)
echo "Upgrades skipped due to active SSH login"
exit 1
;;
0)
if [[ -f /var/run/reboot-required ]]
then
echo "OK, but updates require a reboot"
exit 1
else
echo "OK"
exit 0
fi
;;
*)
echo "Last exitcode was $exitcode"
exit 2
;;
esac

View file

@ -1,9 +0,0 @@
% for uri in sorted(uris):
Types: ${' '.join(sorted(data.get('types', {'deb'})))}
URIs: ${uri}
Suites: ${os_release}
Components: ${' '.join(sorted(data.get('components', {'main'})))}
Architectures: ${' '.join(sorted(data.get('architectures', {'amd64'})))}
Signed-By: /etc/apt/trusted.gpg.d/${name}.list.asc
% endfor

View file

@ -1,47 +0,0 @@
#!/bin/bash
set -xeuo pipefail
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get -y -q -o Dpkg::Options::=--force-confold dist-upgrade
DEBIAN_FRONTEND=noninteractive apt-get -y -q autoclean
DEBIAN_FRONTEND=noninteractive apt-get -y -q autoremove
% if clean_old_kernels:
existing=$(dpkg --get-selections | grep -E '^linux-(image|headers)-[0-9]' || true)
if [[ -z "$existing" ]]
then
echo "ERROR: No installed kernels found! Aborting!" >&2
exit 1
fi
current=$(uname -r | sed -r 's/-[a-zA-Z]+$//')
latest=$(echo "$existing" | sort --version-sort -t- -k 3,4 | tail -n 1 | sed -r 's/[^0-9]+([0-9]\.[^-]+-[0-9]+).*/\1/')
todelete=$(echo "$existing" | grep -v -E "($current|$latest)" | awk '{ print $1 }' || true)
if [[ -n "$todelete" ]]
then
DEBIAN_FRONTEND=noninteractive apt-get -qy purge $todelete
fi
% endif
% for command in sorted(additional_update_commands):
${command}
% endfor
% for affected, restarts in sorted(restart_triggers.items()):
up_since=$(systemctl show "${affected}" | sed -n 's/^ActiveEnterTimestamp=//p' || echo 0)
up_since_ts=$(date -d "$up_since" +%s || echo 0)
now=$(date +%s)
if [ $((now - up_since_ts)) -lt 3600 ]
then
% for restart in sorted(restarts):
systemctl restart "${restart}" || true
% endfor
fi
% endfor

View file

@ -1,15 +0,0 @@
#!/bin/sh
# /etc/kernel/postinst.d/unattended-upgrades
case "$DPKG_MAINTSCRIPT_PACKAGE::$DPKG_MAINTSCRIPT_NAME" in
linux-image-extra*::postrm)
exit 0;;
esac
if [ -d /var/run ]; then
touch /var/run/reboot-required
if ! grep -q "^$DPKG_MAINTSCRIPT_PACKAGE$" /var/run/reboot-required.pkgs 2> /dev/null ; then
echo "$DPKG_MAINTSCRIPT_PACKAGE" >> /var/run/reboot-required.pkgs
fi
fi

View file

@ -1,3 +0,0 @@
deb http://deb.debian.org/debian/ bookworm main non-free contrib non-free-firmware
deb http://security.debian.org/debian-security bookworm-security main contrib non-free
deb http://deb.debian.org/debian/ bookworm-updates main contrib non-free

View file

@ -1,3 +0,0 @@
deb http://deb.debian.org/debian/ bullseye main non-free contrib
deb http://security.debian.org/debian-security bullseye-security main contrib non-free
deb http://deb.debian.org/debian/ bullseye-updates main contrib non-free

View file

@ -1,3 +0,0 @@
deb http://deb.debian.org/debian/ buster main non-free contrib
deb http://security.debian.org/debian-security buster/updates main contrib non-free
deb http://deb.debian.org/debian/ buster-updates main contrib non-free

View file

@ -1 +0,0 @@
deb http://deb.debian.org/debian/ unstable main non-free contrib

View file

@ -1 +0,0 @@
deb http://raspbian.raspberrypi.org/raspbian/ buster main contrib non-free rpi

View file

@ -1,52 +0,0 @@
#!/bin/bash
# With systemd, we can force logging to the journal. This is better than
# spamming the world with cron mails. You can then view these logs using
# "journalctl -rat upgrade-and-reboot".
if which logger >/dev/null 2>&1
then
# Dump stdout and stderr to logger, which will then put everything
# into the journal.
exec 1> >(logger -t upgrade-and-reboot -p user.info)
exec 2> >(logger -t upgrade-and-reboot -p user.error)
fi
. /etc/upgrade-and-reboot.conf
echo "Starting upgrade-and-reboot for node $nodename ..."
statusfile="/var/tmp/unattended_upgrades.status"
# Workaround, because /var/tmp is usually 1777
[[ "$UID" == 0 ]] && chown root:root "$statusfile"
logins=$(ps h -C sshd -o euser | awk '$1 != "root" && $1 != "sshd" && $1 != "sshmon" && $1 != "nobody"')
if [[ -n "$logins" ]]
then
echo "Will abort now, there are active SSH logins: $logins"
echo "abort_ssh" > "$statusfile"
exit 1
fi
softlockdir=/var/lib/bundlewrap/soft-$nodename
mkdir -p "$softlockdir"
printf '{"comment": "UPDATE", "date": %s, "expiry": %s, "id": "UNATTENDED", "items": ["*"], "user": "root@localhost"}\n' \
$(date +%s) \
$(date -d 'now + 30 mins' +%s) \
>"$softlockdir"/UNATTENDED
trap 'rm -f "$softlockdir"/UNATTENDED' EXIT
do-unattended-upgrades
ret=$?
echo "$ret" > "$statusfile"
if (( $ret != 0 ))
then
exit 1
fi
if [[ -f /var/run/reboot-required ]] && [[ "$auto_reboot_enabled" == "True" ]]
then
systemctl reboot
fi
echo "upgrade-and-reboot for node $nodename is DONE"

View file

@ -1,2 +0,0 @@
nodename="${node.name}"
auto_reboot_enabled="${node.metadata.get('apt/unattended-upgrades/reboot_enabled', True)}"

View file

@ -1,23 +1,3 @@
from bundlewrap.exceptions import BundleError
supported_os = {
'debian': {
10: 'buster',
11: 'bullseye',
12: 'bookworm',
99: 'unstable',
},
'raspbian': {
10: 'buster',
},
}
try:
supported_os[node.os][node.os_version[0]]
except (KeyError, IndexError):
raise BundleError(f'{node.name}: OS {node.os} {node.os_version} is not supported by bundle:apt')
actions = {
'apt_update': {
'command': 'apt-get update',
@ -30,76 +10,39 @@ actions = {
}
files = {
'/etc/apt/sources.list': {
'source': 'sources.list-{}-{}'.format(node.os, supported_os[node.os][node.os_version[0]]),
'triggers': {
'action:apt_update',
},
'/etc/apt/apt.conf.d/50unattended-upgrades': {
'content_type': 'mako',
'source': 'apt.conf-unattended-upgrades',
'context': {'data': node.metadata.get('apt', {}).get('unattended-upgrades', {})}
},
'/etc/apt/apt.conf.d/20auto-upgrades': {
'source': 'apt.conf-auto-upgrades',
},
'/etc/cloud': {
'delete': True,
},
'/etc/kernel/postinst.d/unattended-upgrades': {
'source': 'kernel-postinst.d',
'mode': '0755',
},
'/etc/netplan': {
'delete': True,
},
'/etc/upgrade-and-reboot.conf': {
'content_type': 'mako',
},
'/usr/local/sbin/upgrade-and-reboot': {
'mode': '0700',
},
'/usr/local/sbin/do-unattended-upgrades': {
'content_type': 'mako',
'mode': '0700',
'context': {
'additional_update_commands': node.metadata.get('apt/additional_update_commands', set()),
'clean_old_kernels': node.metadata.get('apt/clean_old_kernels', True),
'restart_triggers': node.metadata.get('apt/restart_triggers', {}),
}
},
'/usr/local/share/icinga/plugins/check_unattended_upgrades': {
'mode': '0755',
},
'/var/lib/cloud': {
'delete': True,
},
}
directories = {
'/etc/apt/sources.list.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
}
svc_systemd = {
'apt-daily.timer': {
'running': False,
'enabled': False,
},
'apt-daily-upgrade.timer': {
'running': False,
'enabled': False,
},
'/etc/apt/sources.list.d': {},
}
pkg_apt = {
'apt-transport-https': {},
'unattended-upgrades': {},
'arping': {},
'at': {},
'build-essential': {},
'bzip2': {},
'curl': {},
'diffutils': {},
'dnsutils': {},
'git': {},
'grep': {},
'gzip': {},
'htop': {},
@ -109,34 +52,20 @@ pkg_apt = {
'lsof': {},
'mailutils': {},
'manpages': {},
'molly-guard': {},
'moreutils': {},
'mount': {},
'mtr': {},
'ncdu': {},
'ncurses-term': {},
'netcat-openbsd': {},
'netcat': {},
'nmap': {},
'python3': {},
'python3-dev': {},
'python3-setuptools': {
'needed_by': {
'pkg_pip:',
},
},
'python3-pip': {
'needed_by': {
'pkg_pip:',
},
},
'python3-virtualenv': {},
'rsync': {},
'tar': {},
'tcpdump': {},
'telnet': {},
'tmux': {},
'tree': {},
'unzip': {},
'vim': {},
'wget': {},
'whois': {},
'zip': {},
@ -144,72 +73,25 @@ pkg_apt = {
'cloud-init': {
'installed': False,
},
'molly-guard': {
'installed': False,
},
'netplan.io': {
'installed': False,
},
'popularity-contest': {
'installed': False,
},
'python3-packaging': {
'installed': False,
},
'unattended-upgrades': {
'installed': False,
},
}
if node.os_version[0] >= 11:
symlinks = {
'/usr/bin/python': {
'target': '/usr/bin/python3',
'needs': {
'pkg_apt:python3',
},
for name, data in node.metadata.get('apt', {}).get('repos', {}).items():
files['/etc/apt/sources.list.d/{}.list'.format(name)] = {
'content_type': 'mako',
'content': "\n".join(data['items']),
'triggers': {
'action:apt_update',
},
}
for name, data in node.metadata.get('apt/repos', {}).items():
if 'items' in data:
files['/etc/apt/sources.list.d/{}.list'.format(name)] = {
'content_type': 'mako',
'content': ("\n".join(sorted(data['items']))).format(
os=node.os,
os_release=supported_os[node.os][node.os_version[0]],
),
'triggers': {
'action:apt_update',
},
}
elif 'uris' in data:
uris = {
x.format(
os=node.os,
os_release=supported_os[node.os][node.os_version[0]],
) for x in data['uris']
}
files['/etc/apt/sources.list.d/{}.sources'.format(name)] = {
'source': 'deb822-sources',
'content_type': 'mako',
'context': {
'data': data,
'name': name,
'os_release': supported_os[node.os][node.os_version[0]],
'uris': uris,
},
'triggers': {
'action:apt_update',
},
}
if data.get('install_gpg_key', True):
if 'items' in data:
files['/etc/apt/sources.list.d/{}.list'.format(name)]['needs'] = {
'file:/etc/apt/trusted.gpg.d/{}.list.asc'.format(name),
}
files['/etc/apt/sources.list.d/{}.list'.format(name)]['needs'] = {
'file:/etc/apt/trusted.gpg.d/{}.list.asc'.format(name),
}
files['/etc/apt/trusted.gpg.d/{}.list.asc'.format(name)] = {
'source': 'gpg-keys/{}.asc'.format(name),
@ -218,5 +100,6 @@ for name, data in node.metadata.get('apt/repos', {}).items():
},
}
for package, options in node.metadata.get('apt/packages', {}).items():
pkg_apt[package] = options
if node.metadata.get('apt', {}).get('packages', {}):
for package, options in node.metadata['apt']['packages'].items():
pkg_apt[package] = options

View file

@ -1,42 +0,0 @@
defaults = {
'apt': {
'unattended-upgrades': {
'day': 5,
'hour': 21,
},
},
'icinga2_api': {
'apt': {
'services': {
'UNATTENDED UPGRADES': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_unattended_upgrades',
},
},
},
},
}
@metadata_reactor.provides(
'cron/jobs/upgrade-and-reboot'
)
def patchday(metadata):
day = metadata.get('apt/unattended-upgrades/day')
hour = metadata.get('apt/unattended-upgrades/hour')
spread = metadata.get('apt/unattended-upgrades/spread_in_group', None)
if spread is not None:
spread_nodes = sorted(repo.nodes_in_group(spread))
day += spread_nodes.index(node)
return {
'cron': {
'jobs': {
'upgrade-and-reboot': '{minute} {hour} * * {day} root /usr/local/sbin/upgrade-and-reboot'.format(
minute=node.magic_number % 30,
hour=hour,
day=day%7,
),
},
},
}

View file

@ -1,5 +0,0 @@
context.exec = [
{ path = "pactl" args = "load-module module-native-protocol-tcp" }
{ path = "pactl" args = "load-module module-zeroconf-discover" }
{ path = "pactl" args = "load-module module-zeroconf-publish" }
]

View file

@ -1,3 +0,0 @@
[Autologin]
User=${user}
Session=i3.desktop

View file

@ -1,110 +0,0 @@
from os import listdir
from os.path import join
actions = {
'fc-cache_flush': {
'command': 'fc-cache -f',
'triggered': True,
'needs': {
'pkg_pacman:fontconfig',
},
},
'i3pystatus_create_virtualenv': {
'command': '/usr/bin/python3 -m virtualenv -p python3 /opt/i3pystatus/venv/',
'unless': 'test -d /opt/i3pystatus/venv/',
'needs': {
'directory:/opt/i3pystatus/src',
'pkg_pacman:python-virtualenv',
},
},
'i3pystatus_install': {
'command': ' && '.join([
'cd /opt/i3pystatus/src',
'/opt/i3pystatus/venv/bin/pip install --upgrade pip colour netifaces basiciw pytz',
'/opt/i3pystatus/venv/bin/pip install --upgrade -e .',
]),
'needs': {
'action:i3pystatus_create_virtualenv',
},
'triggered': True,
},
}
directories = {
'/etc/sddm.conf.d': {
'purge': True,
},
'/opt/i3pystatus/src': {},
'/usr/share/fonts/bundlewrap': {
'purge': True,
'triggers': {
'action:fc-cache_flush',
},
},
}
svc_systemd = {
'avahi-daemon': {
'needs': {
'pkg_pacman:avahi',
},
},
'sddm': {
'needs': {
'pkg_pacman:sddm',
},
},
}
git_deploy = {
'/opt/i3pystatus/src': {
'repo': 'https://github.com/enkore/i3pystatus.git',
'rev': 'current',
'triggers': {
'action:i3pystatus_install',
},
},
}
files['/etc/pipewire/pipewire-pulse.conf.d/50-network.conf'] = {}
for filename in listdir(join(repo.path, 'data', 'arch-with-gui', 'files', 'fonts')):
if filename.startswith('.'):
continue
if filename.endswith('.vault'):
# XXX remove this once we have a new bundlewrap release
# https://github.com/bundlewrap/bundlewrap/commit/2429b153dd1ca6781cf3812e2dec9c2b646a546b
from os import environ
if environ.get('BW_VAULT_DUMMY_MODE', '0') == '1':
continue
font_name = filename[:-6]
attrs = {
'content': repo.vault.decrypt_file_as_base64(join('arch-with-gui', 'files', 'fonts', filename)),
'content_type': 'base64',
}
else:
font_name = filename
attrs = {
'source': join('fonts', filename),
'content_type': 'binary',
}
files[f'/usr/share/fonts/bundlewrap/{font_name}'] = {
'triggers': {
'action:fc-cache_flush',
},
**attrs,
}
if node.metadata.get('arch-with-gui/autologin_as', None):
files['/etc/sddm.conf.d/autologin.conf'] = {
'context': {
'user': node.metadata.get('arch-with-gui/autologin_as'),
},
'content_type': 'mako',
'before': {
'svc_systemd:sddm',
},
}

View file

@ -1,124 +0,0 @@
assert node.os == 'arch'
defaults = {
'backups': {
'paths': {
'/etc/netctl',
},
},
'icinga_options': {
'exclude_from_monitoring': True,
},
'nftables': {
'input': {
'50-avahi': {
'udp dport 5353 accept',
'udp sport 5353 accept',
},
},
},
'pacman': {
'packages': {
# fonts
'fontconfig': {},
'ttf-dejavu': {
'needed_by': {
'pkg_pacman:sddm',
},
},
# login management
'sddm': {},
# networking
'avahi': {},
'netctl': {},
'rfkill': {},
'wpa_supplicant': {},
'wpa_actiond': {},
# shell and other gui stuff
'dunst': {},
'fish': {},
'kitty': {},
'libnotify': {}, # provides notify-send
'light': {},
'redshift': {},
'rofi': {},
# sound
'calf': {},
'easyeffects': {},
'lsp-plugins': {},
'pavucontrol': {},
'pipewire': {},
'pipewire-jack': {},
'pipewire-pulse': {},
'pipewire-zeroconf': {},
'qpwgraph': {},
# window management
'i3-wm': {},
'i3lock': {},
'xss-lock': {},
# i3pystatus dependencies
'iw': {},
'wireless_tools': {},
# Xorg
'xf86-input-libinput': {},
'xf86-input-wacom': {},
'xorg-server': {},
'xorg-setxkbmap': {},
'xorg-xev': {},
'xorg-xinput': {},
'xorg-xset': {},
# all them apps
'browserpass': {},
'browserpass-firefox': {},
'ffmpeg': {},
'firefox': {},
'gimp': {},
'imagemagick': {},
'inkscape': {},
'kdenlive': {},
'maim': {},
'mosh': {},
'mosquitto': {},
'mpv': {},
'pass': {},
'pass-otp': {},
'pdftk': {},
'pwgen': {},
'qpdfview': {},
'samba': {},
'shotcut': {},
'sipcalc': {},
'the_silver_searcher': {},
'tlp': {},
'virt-manager': {},
'xclip': {},
'xdotool': {}, # needed for maim window selection
},
},
}
@metadata_reactor.provides(
'backups/paths',
)
def backup_every_user_home(metadata):
paths = set()
for user, config in metadata.get('users', {}).items():
if config.get('delete', False):
continue
paths.add(config.get('home', f'/home/{user}'))
return {
'backups': {
'paths': paths,
},
}

View file

@ -1,28 +0,0 @@
#!/bin/bash
statusfile="/var/tmp/backup.monitoring"
if [[ ! -r "$statusfile" ]]
then
echo "cannot read $statusfile"
exit 3
fi
. "$statusfile"
if [[ -z "$msg" ]] || [[ -z "$status" ]] || [[ -z "$timestamp" ]]
then
echo "status file is corrupt, cannot read status"
exit 3
fi
two_days_ago=$(($(date +%s) - 86400*2))
if [[ $timestamp -lt $two_days_ago ]]
then
echo "last saved status is older than two days"
exit 2
fi
echo "$msg"
exit "$status"

View file

@ -1,121 +0,0 @@
#!/bin/bash
statusfile="/var/tmp/backup.monitoring"
logdir="/var/log/backup-client"
lock="/tmp/backup-client-is-running"
ssh_login="${username}@${server}"
ssh_opts="-o IdentityFile=/etc/backup.priv -o StrictHostKeyChecking=accept-new -p ${port}"
nodename="${node.name}"
<%text>
try="${1:-<unknown>}"
[[ -n "$DEBUG" ]] && set -x
do_backup() {
echo "==> starting backup for '$1'"
# Compress level 1 is a good compromise between speed and cpu usage.
rsync --compress-level=1 -aAP --numeric-ids --delete --relative \
--rsync-path="/usr/bin/rsync --fake-super" \
-e "ssh $ssh_opts" \
"$1" "$ssh_login":backups/
# Exit code 24 means some files have vanished during rsync.
# I don't know why, but this is very common, apparently?
exitcode=$?
echo "==> backup for '$1' exited $exitcode"
if [[ $exitcode != 0 ]] && [[ $exitcode != 24 ]]
then
rsync_errors+=" $1 ($exitcode)"
fi
}
on_exit() {
rmdir "$lock"
echo "*** END BACKUP RUN $(date '+%F %T %z') ***"
}
prepare_and_cleanup_logdir() {
# rsync logs tend to get very large. That's why we pipe them through
# gzip when writing. Because we're running multiple tries, we cannot
# rely on logrotate to rotate the logs, we have to do it ourselves.
# Of course that means we have to clean up after ourselves, too.
mkdir -p "$logdir"
find "$logdir" -type f -mtime +14 -name "*.log" -delete
find "$logdir" -type f -mtime +14 -name "*.gz" -delete
}
save_result_for_monitoring() {
code=$1
msg=$2
printf "status=%q\n" "$code" > "$statusfile"
printf "msg=%q\n" "$msg" >> "$statusfile"
printf "timestamp=%q\n" "$(date +%s)" >> "$statusfile"
}
if ! mkdir "$lock" >/dev/null 2>&1
then
save_result_for_monitoring 2 "could not get lock"
exit 1
fi
trap "on_exit" EXIT
# redirect stdout and stderr to logfile
prepare_and_cleanup_logdir
logfile="$logdir/backup--$(date '+%F--%H-%M-%S')--$$.log.gz"
echo "All log output will go to $logfile" | logger -it backup-client
exec > >(gzip >"$logfile")
exec 2>&1
# this is where the real work starts
ts_begin=$(date +%s)
echo "*** BEGIN BACKUP RUN $(date '+%F %T %z') ***"
echo "This is attempt $try"
echo "using ssh options [$ssh_opts]"
echo "using ssh login [$ssh_login]"
if ! [[ -f /etc/backup.priv ]]
then
save_result_for_monitoring 2 "/etc/backup.priv does not exist"
exit 100
fi
for i in /etc/backup-pre-hooks.d/*
do
[[ -x "$i" ]] || continue
echo "Running pre-hook '$i'"
if ! $i
then
save_result_for_monitoring 2 "pre-hook '$i' failed to run"
exit 1
fi
done
rsync_errors=""
</%text>
% for path in sorted(paths):
do_backup "${path}"
% endfor
<%text>
if [[ -n "$rsync_errors" ]]
then
save_result_for_monitoring 2 "rsync failed:$rsync_errors"
exit 1
fi
ssh $ssh_opts $ssh_login "sudo /usr/local/bin/rotate-single-backup-client $nodename" </dev/null
ssh_error=$?
if [[ $ssh_error -ne 0 ]]
then
save_result_for_monitoring 2 "rotating backups failed with status code $ssh_error"
exit 1
fi
ts_end=$(date +%s)
echo "Success"
save_result_for_monitoring 0 "Backup finished at $(date '+%F %T %z') (took $((ts_end - ts_begin)) seconds)"
</%text>

View file

@ -1,22 +0,0 @@
#!/bin/bash
# Try generating a backup multiple times. If one attempt succeeds, we're
# done. If not, there will be logs for every attempt, plus monitoring
# will read the result of the last backup.
for try in {1..3}
do
generate-backup "$try"
exitcode=$?
if [[ $exitcode -eq 100 ]]
then
# fatal error, cannot recover
exit 1
elif [[ $exitcode -eq 0 ]]
then
# successful backup
exit 0
else
sleep 60
fi
done

View file

@ -1,74 +0,0 @@
from os.path import join
if node.has_bundle('zfs'):
wanted_paths = node.metadata.get('backups/paths', set())
snapshot_paths = node.metadata.get('zfs/filesystems_with_backup_snapshots', {})
backup_paths = set()
for path in wanted_paths:
path_found = False
for zfs_paths in snapshot_paths.values():
if path in zfs_paths:
backup_paths.add(f'/mnt/backup-snapshot{path}')
path_found = True
if not path_found:
backup_paths.add(path)
else:
backup_paths = node.metadata.get('backups/paths', set())
if node.metadata.get('backups/exclude_from_backups', False):
# make sure nobody tries to do something funny
for file in {
'/etc/backup.priv',
'/usr/local/bin/generate-backup',
'/usr/local/bin/generate-backup-with-retries',
'/var/tmp/backup.monitoring', # status file
}:
files[file] = {
'delete': True,
}
else:
backup_target = repo.get_node(node.metadata.get('backup-client/target'))
files['/etc/backup.priv'] = {
'content': repo.libs.ssh.generate_ed25519_private_key(
node.metadata.get('backup-client/user-name'),
backup_target,
),
'mode': '0400',
}
files['/usr/local/bin/generate-backup'] = {
'content_type': 'mako',
'context': {
'username': node.metadata.get('backup-client/user-name'),
'server': backup_target.metadata.get('backup-server/my_hostname'),
'port': backup_target.metadata.get('backup-server/my_ssh_port'),
'paths': backup_paths,
},
'mode': '0700',
}
files['/usr/local/bin/generate-backup-with-retries'] = {
'mode': '0700',
}
files['/usr/local/share/icinga/plugins/check_backup_last_run'] = {
'mode': '0755',
}
files['/etc/logrotate.d/backup-client'] = {
'delete': True,
}
directories['/etc/backup-pre-hooks.d'] = {
'purge': True,
}
for hname, hcontent in node.metadata.get('backup-client/pre-hooks', {}).items():
files[f'/etc/backup-pre-hooks.d/50-{hname}'] = {
'content': '#!/bin/sh\n\n' + hcontent,
'mode': '0700',
}

View file

@ -1,39 +0,0 @@
from hashlib import md5
defaults = {
'backup-client': {
# unix user names cannot be longer than 32 characters.
# bundlewrap raises an error if the name is longer than 30 chars.
'user-name': 'c-' + md5(node.name.encode('UTF-8')).hexdigest()[:28],
},
}
@metadata_reactor.provides(
'cron/jobs/backup',
'icinga2_api/backup-client/services',
)
def cron(metadata):
if metadata.get('backups/exclude_from_backups', False):
return {}
return {
'cron': {
'jobs': {
# spread backups between 00:00 and 04:59 UTC
'backup': '{} {} * * * root /usr/local/bin/generate-backup-with-retries'.format(
(node.magic_number % 60),
(node.magic_number % 2),
),
},
},
'icinga2_api': {
'backup-client': {
'services': {
'BACKUP LAST RUN': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_backup_last_run',
},
},
},
},
}

View file

@ -1,57 +0,0 @@
#!/usr/bin/env python3
from datetime import datetime
from json import load
from subprocess import check_output
from sys import argv, exit
from time import time
NODE = argv[1]
ONE_BACKUP_EVERY_HOURS = int(argv[2])
NOW = int(time())
HOUR_SECONDS = 60 * 60
snaps = set()
try:
with open(f'/etc/backup-server/config.json', 'r') as f:
server_settings = load(f)
# get all existing snapshots for NODE
for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True).splitlines():
line = line.decode('UTF-8')
if line.startswith('{}/{}@'.format(server_settings['zfs-base'], NODE)):
_, snapname = line.split('@', 1)
if 'zfs-auto-snap' in snapname:
# migration from auto-snapshots, ignore
continue
ts, bucket = snapname.split('-', 1)
snaps.add(int(ts))
if not snaps:
print('No backups found!')
exit(2)
last_snap = sorted(snaps)[-1]
delta = NOW - last_snap
print('Last backup was on {} UTC'.format(
datetime.fromtimestamp(last_snap).strftime('%Y-%m-%d %H:%M:%S'),
))
# One day without backups is still okay. There may be fluctuations
# because of transfer speed, amount of data, changes in backup
# schedule etc.
if delta > ((HOUR_SECONDS * (ONE_BACKUP_EVERY_HOURS + 1)) + (HOUR_SECONDS*24)):
exit(2)
elif delta > (HOUR_SECONDS * (ONE_BACKUP_EVERY_HOURS + 1)):
exit(1)
else:
exit(0)
except Exception as e:
print(repr(e))
exit(3)

View file

@ -1,112 +0,0 @@
#!/usr/bin/env python3
from json import load
from subprocess import check_call, check_output
from sys import argv
from time import time
NODE = argv[1]
NOW = int(time())
DAY_SECONDS = 60 * 60 * 24
INTERVALS = {
'daily': DAY_SECONDS,
'weekly': 7 * DAY_SECONDS,
'monthly': 30 * DAY_SECONDS,
}
buckets = {}
def syslog(msg):
check_output(['logger', '-t', f'backup-{NODE}', msg])
with open(f'/etc/backup-server/config.json', 'r') as f:
server_settings = load(f)
with open(f'/etc/backup-server/clients/{NODE}', 'r') as f:
client_settings = load(f)
# get all existing snapshots for NODE
for line in check_output('LC_ALL=C zfs list -H -t snapshot -o name', shell=True).splitlines():
line = line.decode('UTF-8')
if line.startswith('{}/{}@'.format(server_settings['zfs-base'], NODE)):
_, snapname = line.split('@', 1)
if 'zfs-auto-snap' in snapname:
# migration from auto-snapshots, ignore
continue
ts, bucket = snapname.split('-', 1)
buckets.setdefault(bucket, set()).add(int(ts))
syslog(f'classified {line} as {bucket} from {ts}')
# determine if we need to create a new snapshot
for bucket in INTERVALS.keys():
snapshots = sorted(buckets.get(bucket, set()))
if snapshots:
last_snap = snapshots[-1]
delta = NOW - last_snap
fresh_age = INTERVALS[bucket] - DAY_SECONDS
if delta > fresh_age:
# last snapshot is older than what we want. create a new one.
check_call(
'zfs snapshot {}/{}@{}-{}'.format(
server_settings['zfs-base'],
NODE,
NOW,
bucket,
),
shell=True,
)
buckets.setdefault(bucket, set()).add(NOW)
syslog(f'created new snapshot {NOW}-{bucket}')
else:
syslog(f'existing snapshot {last_snap}-{bucket} is fresh enough')
else:
check_call(
'zfs snapshot {}/{}@{}-{}'.format(
server_settings['zfs-base'],
NODE,
NOW,
bucket,
),
shell=True,
)
buckets.setdefault(bucket, set()).add(NOW)
syslog(f'created initial snapshot {NOW}-{bucket}')
# finally, see if we can delete any snapshots, because they are old enough
for bucket in INTERVALS.keys():
snapshots = sorted(buckets.get(bucket, set()))
if not snapshots:
syslog(f'something is wrong, there are no snapshots for {bucket}')
continue
# see comment in zfs-auto-snapshot about doing +1 here
keep_age = INTERVALS[bucket] * (client_settings[bucket]+1)
# oldest snapshots come first
for ts in snapshots[:-int(client_settings[bucket])]:
delta = NOW - ts
if delta >= keep_age:
check_call(
'zfs destroy {}/{}@{}-{}'.format(
server_settings['zfs-base'],
NODE,
ts,
bucket,
),
shell=True,
)
syslog(f'removing snapshot {ts}-{bucket}, age {delta}, keep_age {keep_age}')
else:
syslog(f'keeping snapshot {ts}-{bucket}, age not reached')
for ts in snapshots[int(client_settings[bucket]):]:
syslog(f'keeping snapshot {ts}-{bucket}, count')

View file

@ -1,3 +0,0 @@
% for username, nodename in sorted(clients.items()):
${username} ALL=NOPASSWD:/usr/local/bin/rotate-single-backup-client ${nodename}
% endfor

View file

@ -1,66 +0,0 @@
repo.libs.tools.require_bundle(node, 'zfs')
from os.path import join
from bundlewrap.metadata import metadata_to_json
dataset = node.metadata.get('backup-server/zfs-base')
files = {
'/etc/backup-server/config.json': {
'content': metadata_to_json({
'zfs-base': dataset,
}),
},
'/usr/local/bin/rotate-single-backup-client': {
'mode': '0755',
},
'/usr/local/share/icinga/plugins/check_backup_for_node': {
'mode': '0755',
},
}
directories['/etc/backup-server/clients'] = {
'purge': True,
}
sudoers = {}
for nodename, config in node.metadata.get('backup-server/clients', {}).items():
sudoers[config['user']] = nodename
users[config['user']] = {
'home': f'/srv/backups/{nodename}',
}
files[f'/etc/backup-server/clients/{nodename}'] = {
'content': metadata_to_json(config['retain']),
}
files[f'/srv/backups/{nodename}/.ssh/authorized_keys'] = {
'content': repo.libs.ssh.generate_ed25519_public_key(
config['user'],
node,
),
'owner': config['user'],
'mode': '0400',
'needs': {
f'zfs_dataset:{dataset}/{nodename}',
},
}
directories[f'/srv/backups/{nodename}/backups'] = {
'owner': config['user'],
'mode': '0700',
'needs': {
f'zfs_dataset:{dataset}/{nodename}',
},
}
files['/etc/sudoers.d/backup-server'] = {
'source': 'sudoers',
'content_type': 'mako',
'context': {
'clients': sudoers,
},
}

View file

@ -1,172 +0,0 @@
defaults = {
'backup-server': {
'my_ssh_port': 22,
},
'openssh': {
'allowed_users': {
# Usernames for backup clients always start with 'c-'
'c-*',
},
},
'zfs': {
# The whole point of doing backups is to keep them for a long
# time, which eliminates the need for this check.
'enable_old_snapshots_check': False,
},
}
@metadata_reactor.provides(
'backup-server/clients',
'backup-server/my_hostname',
)
def get_my_clients(metadata):
my_clients = {}
retain_defaults = {
'daily': 14,
'weekly': 4,
'monthly': 6,
}
for rnode in repo.nodes:
if not rnode.has_bundle('backup-client') or rnode.metadata.get('backups/exclude_from_backups', False):
continue
if node.name != rnode.metadata.get('backup-client/target'):
continue
my_clients[rnode.name] = {
'exclude_from_monitoring': rnode.metadata.get(
'backup-client/exclude_from_monitoring',
rnode.metadata.get(
'icinga_options/exclude_from_monitoring',
False,
),
),
'one_backup_every_hours': rnode.metadata.get('backup-client/one_backup_every_hours', 24),
'user': rnode.metadata.get('backup-client/user-name'),
'retain': {
'daily': rnode.metadata.get('backups/retain/daily', retain_defaults['daily']),
'weekly': rnode.metadata.get('backups/retain/weekly', retain_defaults['weekly']),
'monthly': rnode.metadata.get('backups/retain/monthly', retain_defaults['monthly']),
},
}
return {
'backup-server': {
'clients': my_clients,
'my_hostname': metadata.get('hostname'),
},
}
@metadata_reactor.provides(
'backup-server/zfs-base',
'dm-crypt/encrypted-devices',
'zfs/pools',
)
def zfs_pool(metadata):
if not metadata.get('backup-server/encrypted-devices', {}):
return {}
crypt_devices = {}
pool_devices = set()
unlock_actions = set()
for number, (device, passphrase) in enumerate(sorted(metadata.get('backup-server/encrypted-devices', {}).items())):
crypt_devices[device] = {
'dm-name': f'backup{number}',
'passphrase': passphrase,
}
pool_devices.add(f'/dev/mapper/backup{number}')
unlock_actions.add(f'action:dm-crypt_open_backup{number}')
pool_opts = {
'devices': pool_devices,
}
if len(pool_devices) > 2:
pool_opts['type'] = 'raidz'
elif len(pool_devices) > 1:
pool_opts['type'] = 'mirror'
return {
'backup-server': {
'zfs-base': 'backups',
},
'dm-crypt': {
'encrypted-devices': crypt_devices,
},
'zfs': {
'pools': {
'backups': {
'when_creating': {
'config': [
pool_opts,
],
},
'needs': unlock_actions,
# That's a bit hacky. We do it this way to auto-import
# the pool after decrypting the devices. Otherwise
# the pool wouldn't exist, which leads to bundlewrap
# trying to re-create the pool.
# Also, -N to not auto-mount anything.
'unless': 'zpool import -N backups',
},
},
}
}
@metadata_reactor.provides(
'zfs/datasets',
'zfs/snapshots/snapshot_never',
)
def zfs_datasets_and_snapshots(metadata):
zfs_datasets = {}
for client in metadata.get('backup-server/clients', {}).keys():
dataset = '{}/{}'.format(metadata.get('backup-server/zfs-base'), client)
zfs_datasets[dataset] = {
'mountpoint': '/srv/backups/{}'.format(client),
'compression': 'on',
}
return {
'zfs': {
'datasets': zfs_datasets,
'snapshots': {
'snapshot_never': {
metadata.get('backup-server/zfs-base'),
},
},
},
}
@metadata_reactor.provides(
'icinga2_api/backup-server/services',
)
def monitoring(metadata):
services = {}
for client, config in metadata.get('backup-server/clients', {}).items():
if config.get('exclude_from_monitoring', False):
continue
services[f'BACKUPS FOR NODE {client}'] = {
'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_backup_for_node {} {}'.format(
client,
config['one_backup_every_hours'],
),
'vars.sshmon_timeout': 20,
}
return {
'icinga2_api': {
'backup-server': {
'services': services,
},
},
}

View file

@ -1,3 +0,0 @@
% for k, v in sorted(node.metadata.get('environment', {}).items()):
${k}=${v}
% endfor

View file

@ -1,39 +0,0 @@
# Beware! This file is rewritten by htop when settings are changed in the interface.
# The parser is also very primitive, and not human-friendly.
fields=0 48 17 18 38 39 40 2 46 47 49 1
sort_key=46
sort_direction=-1
tree_sort_key=0
tree_sort_direction=1
hide_kernel_threads=1
hide_userland_threads=0
shadow_other_users=0
show_thread_names=0
show_program_path=1
highlight_base_name=1
highlight_megabytes=0
highlight_threads=1
highlight_changes=0
highlight_changes_delay_secs=5
find_comm_in_cmdline=1
strip_exe_from_cmdline=1
show_merged_command=0
tree_view=0
tree_view_always_by_pid=0
header_margin=1
detailed_cpu_time=1
cpu_count_from_one=1
show_cpu_usage=1
show_cpu_frequency=0
show_cpu_temperature=0
degree_fahrenheit=0
update_process_names=0
account_guest_in_cpu_meter=0
color_scheme=0
enable_mouse=0
delay=10
left_meters=Tasks LoadAverage Uptime Memory CPU LeftCPUs2 CPU
left_meter_modes=2 2 2 1 1 1 2
right_meters=Hostname CPU RightCPUs2
right_meter_modes=2 3 1
hide_function_bar=0

View file

@ -1 +0,0 @@
LANG=${node.metadata['locale']['default']}

View file

@ -1,3 +0,0 @@
% for locale in sorted(node.metadata['locale']['installed']):
${locale} ${locale.split('.')[-1]}
% endfor

View file

@ -1,96 +0,0 @@
from inspect import cleandoc
from uuid import UUID
from bundlewrap.utils.text import italic
files = {
'/etc/default/locale': {
'content_type': 'mako',
'needs': {
'action:locale-gen',
},
},
'/etc/hosts': {
'content_type': 'mako',
},
'/etc/htoprc.global': {
'source': 'htoprc',
},
'/etc/motd': {
'content': '',
},
'/etc/environment': {
'content_type': 'mako',
'before': {
'action:',
'pkg_apt:',
'pkg_pacman:',
},
},
}
locale_needs = set()
for locale in sorted(node.metadata.get('locale/installed')):
actions[f'ensure_locale_{locale}_is_enabled'] = {
'command': f"sed -i '/{locale}/s/^# *//g' /etc/locale.gen",
'unless': f"grep -e '^{locale}' /etc/locale.gen",
'triggers': {
'action:locale-gen',
},
'needs': locale_needs,
}
locale_needs = {f'action:ensure_locale_{locale}_is_enabled'}
actions = {
'locale-gen': {
'triggered': True,
'command': 'locale-gen',
},
}
description = []
if not node.metadata.get('icinga_options/exclude_from_monitoring', False):
description.append('icingaweb2: https://icinga.franzi.business/monitoring/host/show?host={}'.format(node.name))
if node.has_bundle('telegraf'):
description.append('Grafana: https://grafana.kunsmann.eu/d/{}'.format(UUID(int=node.magic_number).hex[:10]))
if (
not node.metadata.get('icinga_options/exclude_from_monitoring', False) or
node.has_bundle('telegraf')
):
description.append('') # divider line
if node.metadata.get('nginx/vhosts', {}):
description.append('nginx vhosts:')
for vname, vconfig in sorted(node.metadata.get('nginx/vhosts', {}).items()):
if vconfig.get('ssl', 'letsencrypt') is not None:
proto = 'https'
else:
proto = 'http'
domain = vconfig.get('domain', vname)
description.append(' {}: {}://{}{}'.format(
vname,
proto,
domain,
vconfig.get('website_check_path', '/'),
))
if node.metadata.get('description', []):
description.append('') # divider line
for line in node.metadata.get('description', []):
description.append('# {}'.format(italic(line)))
if description:
files['/etc/node.description'] = {
'content': '\n'.join(description) + '\n',
}
else:
files['/etc/node.description'] = {
'delete': True,
}

View file

@ -1,25 +0,0 @@
defaults = {
'bash_functions': {
'h': 'cp /etc/htoprc.global ~/.htoprc; mkdir -p ~/.config/htop; cp /etc/htoprc.global ~/.config/htop/htoprc; htop',
},
'locale': {
'default': 'en_US.UTF-8',
'installed': {
'de_DE.UTF-8',
'en_US.UTF-8',
},
},
}
@metadata_reactor.provides(
'locale/installed',
)
def ensure_default_is_installed(metadata):
return {
'locale': {
'installed': {
metadata.get('locale/default'),
},
},
}

View file

@ -0,0 +1,6 @@
% for key in keys:
key ${key['name']} {
algorithm ${key['algorithm']};
secret "${key['secret']}";
};
% endfor

View file

@ -0,0 +1,30 @@
include "/etc/bind/keys.conf";
% for zone in sorted(primary_zones):
zone "${zone}" IN {
type master;
file "/var/lib/bind/primary/${zone}";
};
% endfor
zone "10.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "16.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "17.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "18.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "19.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "20.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "21.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "22.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "23.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "24.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "25.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "26.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "27.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "28.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "29.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "30.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "31.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };
zone "168.192.in-addr.arpa" { type master; file "/etc/bind/db.empty"; };

View file

@ -0,0 +1,3 @@
% for o in node.metadata.get('bind', {}).get('options', []):
<%include file="options/${o}"/>
% endfor

144
bundles/bind/items.py Normal file
View file

@ -0,0 +1,144 @@
from os import listdir
from os.path import isfile, join
from datetime import datetime
from subprocess import check_output
ZONE_HEADER = """
; _ ____ _ _ _____ _ _ _ _ ____
; / \\ / ___| | | |_ _| | | | \\ | |/ ___|
; / _ \\| | | |_| | | | | | | | \\| | | _
; / ___ \\ |___| _ | | | | |_| | |\\ | |_| |
; /_/ \\_\\____|_| |_| |_| \\___/|_| \\_|\\____|
;
; --> Diese Datei wird von BundleWrap verwaltet! <--
$TTL 60
@ IN SOA ns-1.kunbox.net. hostmaster.kunbox.net. (
{serial}
3600
3600
86400
300
)
@ IN NS ns-1.kunbox.net.
IN NS ns-2.kunbox.net.
"""
svc_systemd = {
'bind9': {
'needs': {
'pkg_apt:bind9',
},
},
}
pkg_apt = {
'bind9': {},
}
directories = {
"/var/lib/bind/primary": {
'group': 'bind',
'needs': {
'pkg_apt:bind9',
},
'owner': 'bind',
'purge': True,
},
"/var/log/named": {
'group': 'bind',
'needs': {
'pkg_apt:bind9',
},
'owner': 'bind',
},
}
files = {
"/etc/bind/keys.conf": {
'content_type': 'mako',
'group': 'bind',
'mode': '0440',
'context': {
'keys': node.metadata.get('bind', {}).get('keys', []),
},
'triggers': {
'svc_systemd:bind9:reload',
},
'needs': {
'pkg_apt:bind9',
},
},
"/etc/bind/named.conf.options": {
'content_type': 'mako',
'group': 'bind',
'mode': '0440',
'triggers': {
'svc_systemd:bind9:reload',
},
'needs': {
'pkg_apt:bind9',
},
},
}
if node.metadata.get('bind', {}).get('rndc', ''):
files['/etc/bind/rndc.conf'] = {
'mode': '0440',
'source': 'rndc/{}'.format(node.metadata['bind']['rndc']),
'content_type': 'mako',
'triggers': {
'svc_systemd:bind9:reload',
},
}
# this looks for zones either directly at data/bind/zones/ or in a subdirectory if so configured
zone_path = join(
repo.path,
'data', 'bind', 'files', 'zones',
node.metadata.get('bind', {}).get('zone_path', ""),
)
primary_zones = set()
for zone in listdir(zone_path):
if not isfile(join(zone_path, zone)) or zone.startswith(".") or zone.startswith("_"):
continue
output = check_output(['git', 'log', '-1', '--pretty=%ci', join(zone_path, zone)]).decode('utf-8').strip()
serial = datetime.strptime(output, '%Y-%m-%d %H:%M:%S %z').strftime('%y%m%d%H%M')
primary_zones.add(zone)
files["/var/lib/bind/primary/{}".format(zone)] = {
'content_type': 'mako',
'context': {
'header': ZONE_HEADER.format(serial=serial),
'metadata_records': node.metadata.get('bind', {}).get('zones_primary', {}).get(zone, {}).get('records', []),
},
'mode': '0444',
'owner': 'bind',
'source': 'zones/{}'.format(join(node.metadata.get('bind', {}).get('zone_path', ""), zone)),
'triggers': {
'svc_systemd:bind9:reload',
},
'needs': {
'pkg_apt:bind9'
},
}
primary_zones.union(set(node.metadata.get('bind', {}).get('zones_primary', {}).keys()))
files['/etc/bind/named.conf.local'] = {
'content_type': 'mako',
'context': {
'primary_zones': list(primary_zones),
},
'group': 'bind',
'triggers': {
'svc_systemd:bind9:reload',
},
'needs': {
'pkg_apt:bind9',
},
}

33
bundles/bind/metadata.py Normal file
View file

@ -0,0 +1,33 @@
from bundlewrap.metadata import atomic
defaults = {
'icinga2_api': {
'bind': {
'services': {
'BIND PROCESS': {
'command_on_monitored_host': '/usr/lib/nagios/plugins/check_procs -C named -c 1:1',
},
},
},
},
}
@metadata_reactor
def port_checks(metadata):
services = {}
for interface in metadata.get('bind/listen', set()):
services[f'BIND PORT {interface}'] = {
'check_command': 'tcp',
'vars.tcp_address': metadata.get(f'interfaces/{interface}/ip_addresses')[0],
'vars.tcp_port': 53,
}
return {
'icinga2_api': {
'bind': {
'services': services,
},
},
}

View file

@ -1,43 +0,0 @@
log syslog all;
router id ${node.metadata.get('bird/my_ip')};
debug protocols all;
ipv4 table master4;
protocol device {
}
protocol kernel {
scan time 30;
ipv4 {
export where source != RTS_STATIC;
};
}
% if node.metadata.get('bird/static_routes', set()):
protocol static {
ipv4;
% for route in sorted(node.metadata.get('bird/static_routes', set())):
% for name, config in sorted(node.metadata.get('bird/bgp_neighbors', {}).items()):
route ${route} via ${config['local_ip']};
% endfor
% endfor
}
% endif
% for name, config in sorted(node.metadata.get('bird/bgp_neighbors', {}).items()):
protocol bgp '${name}' {
local ${config['local_ip']} as ${config['local_as']};
neighbor ${config['neighbor_ip']} as ${config['neighbor_as']};
hold time ${config.get('hold_time', 15)};
error wait time 5, 10;
direct;
ipv4 {
next hop self;
import all;
export all;
};
}
% endfor

View file

@ -1,21 +0,0 @@
if node.os == 'arch':
filename = '/etc/bird.conf'
else:
filename = '/etc/bird/bird.conf'
files = {
filename: {
'content_type': 'mako',
'triggers': {
'svc_systemd:bird:reload',
},
},
}
svc_systemd = {
'bird': {
'needs': {
f'file:{filename}',
},
},
}

View file

@ -1,96 +0,0 @@
from ipaddress import ip_network
from bundlewrap.exceptions import NoSuchNode
from bundlewrap.metadata import atomic
defaults = {
'apt': {
'packages': {
'bird2': {
'needed_by': {
'svc_systemd:bird',
},
},
},
},
'pacman': {
'packages': {
'bird': {
'needed_by': {
'svc_systemd:bird',
},
},
},
},
'sysctl': {
'options': {
'net.ipv4.conf.all.forwarding': '1',
'net.ipv6.conf.all.forwarding': '1',
},
},
}
@metadata_reactor.provides(
'bird/bgp_neighbors',
)
def neighbor_info_from_wireguard(metadata):
neighbors = {}
my_as = repo.libs.s2s.AS_NUMBERS[metadata.get('location')]
for name, config in metadata.get('wireguard/peers', {}).items():
try:
rnode = repo.get_node(name)
except NoSuchNode:
continue
if not rnode.has_bundle('bird'):
continue
neighbors[name] = {
'local_ip': config['my_ip'],
'local_as': my_as,
'neighbor_ip': config['their_ip'],
'neighbor_as': repo.libs.s2s.AS_NUMBERS[rnode.metadata.get('location')],
}
return {
'bird': {
'bgp_neighbors': neighbors,
},
}
@metadata_reactor.provides(
'bird/my_ip',
)
def my_ip(metadata):
if node.has_bundle('wireguard'):
wg_ifaces = sorted({iface for iface in metadata.get('interfaces').keys() if iface.startswith('wg_')})
if not wg_ifaces:
return {}
my_ip = sorted(metadata.get(f'interfaces/{wg_ifaces[0]}/ips'))[0].split('/')[0]
else:
my_ip = str(sorted(repo.libs.tools.resolve_identifier(repo, node.name))[0])
return {
'bird': {
'my_ip': my_ip,
},
}
@metadata_reactor.provides(
'firewall/port_rules',
)
def firewall(metadata):
sources = set()
for config in metadata.get('bird/bgp_neighbors', {}).values():
sources.add(config['neighbor_ip'])
return {
'firewall': {
'port_rules': {
'179/tcp': atomic(sources),
},
},
}

View file

@ -1 +0,0 @@
../../apt/files/check_unattended_upgrades

View file

@ -1 +0,0 @@
../../cron/files/cron_template

View file

@ -1 +0,0 @@
../../apt/files/do-unattended-upgrades

View file

@ -1 +0,0 @@
../../apt/files/kernel-postinst.d

View file

@ -1,62 +0,0 @@
server {
server_name ${domain};
root ${webroot if webroot else '/var/www/{}/'.format(vhost)};
index index.html index.htm;
listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_trusted_certificate /etc/letsencrypt/live/${domain}/chain.pem;
ssl_certificate /etc/letsencrypt/live/${domain}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${domain}/privkey.pem;
ssl_dhparam /etc/ssl/dhparam4096.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 5s;
% if max_body_size:
client_max_body_size ${max_body_size};
% elif proxy:
client_max_body_size 5M;
% endif
add_header Permissions-Policy interest-cohort=();
location /.well-known/acme-challenge/ {
alias /var/www/dehydrated;
}
% if locations:
% for location, options in locations.items():
location ${location} {
proxy_pass ${options['target']};
proxy_http_version ${options.get('http_version', '1.1')};
proxy_set_header Host ${domain};
% if options.get('websockets', False):
proxy_set_header Connection "upgrade";
proxy_set_header Upgrade $http_upgrade;
% endif
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto HTTPS;
proxy_set_header X-Forwarded-Host ${domain};
% for option, value in options.get('proxy_set_header', {}).items():
proxy_set_header ${option} ${value};
% endfor
% if location != '/':
proxy_set_header X-Script-Name ${location};
% endif
proxy_buffering off;
}
% endfor
% endif
% if extras:
<%include file="extras/${node.name}/${vhost}" />
% endif
}

View file

@ -1 +0,0 @@
../../apt/files/upgrade-and-reboot

View file

@ -1 +0,0 @@
../../apt/files/upgrade-and-reboot.conf

View file

@ -1,188 +0,0 @@
from bundlewrap.exceptions import BundleError
supported_os = {
'debian': {
10: 'buster',
11: 'bullseye',
12: 'bookworm',
99: 'unstable',
},
'raspbian': {
10: 'buster',
},
}
try:
supported_os[node.os][node.os_version[0]]
except (KeyError, IndexError):
raise BundleError(f'{node.name}: OS {node.os} {node.os_version} is not supported by bundle:apt')
CONFLICTING_BUNDLES = {
'apt',
'nginx',
'telegraf',
'users',
}
if any(node.has_bundle(i) for i in CONFLICTING_BUNDLES):
raise BundleError(f'{node.name}: bundle:c3voc-addons conflicts with bundles: {", ".join(sorted(CONFLICTING_BUNDLES))}')
pkg_apt = {
'apt-transport-https': {},
'build-essential': {},
'curl': {},
'git': {},
'grep': {},
'gzip': {},
'htop': {},
'jq': {},
'less': {},
'mtr': {},
'ncdu': {},
'netcat': {},
'python3': {},
'python3-dev': {},
'python3-setuptools': {
'needed_by': {
'pkg_pip:',
},
},
'python3-pip': {
'needed_by': {
'pkg_pip:',
},
},
'python3-virtualenv': {},
'rsync': {},
'tar': {},
'tmux': {},
'tree': {},
'wget': {},
}
if node.metadata.get('apt/packages', {}):
for package, options in node.metadata['apt']['packages'].items():
pkg_apt[package] = options
actions = {
'systemd-reload': {
'command': 'systemctl daemon-reload',
'cascade_skip': False,
'triggered': True,
'needed_by': {
'svc_systemd:',
},
},
'apt_update': {
'command': 'apt-get update',
'needed_by': {
'pkg_apt:',
},
'triggered': True,
'cascade_skip': False,
},
}
directories = {
'/etc/nginx/sites-enabled': {
'purge': True,
'triggers': {
'svc_systemd:nginx:restart',
},
},
}
files = {
'/etc/kernel/postinst.d/unattended-upgrades': {
'source': 'kernel-postinst.d',
},
'/etc/upgrade-and-reboot.conf': {
'content_type': 'mako',
},
'/usr/local/share/icinga/plugins/check_unattended_upgrades': {
'mode': '0755',
},
'/usr/local/sbin/upgrade-and-reboot': {
'mode': '0700',
},
'/usr/local/sbin/do-unattended-upgrades': {
'content_type': 'mako',
'mode': '0700',
'context': {
'additional_update_commands': node.metadata.get('apt/additional_update_commands', set()),
'clean_old_kernels': node.metadata.get('apt/clean_old_kernels', True),
'restart_triggers': node.metadata.get('apt/restart_triggers', {}),
}
},
}
for name, data in node.metadata.get('apt/repos', {}).items():
files['/etc/apt/sources.list.d/{}.list'.format(name)] = {
'content_type': 'mako',
'content': ("\n".join(sorted(data['items']))).format(
os=node.os,
os_release=supported_os[node.os][node.os_version[0]],
),
'triggers': {
'action:apt_update',
},
}
if data.get('install_gpg_key', True):
files['/etc/apt/sources.list.d/{}.list'.format(name)]['needs'] = {
'file:/etc/apt/trusted.gpg.d/{}.list.asc'.format(name),
}
files['/etc/apt/trusted.gpg.d/{}.list.asc'.format(name)] = {
'source': 'gpg-keys/{}.asc'.format(name),
'triggers': {
'action:apt_update',
},
}
for crontab, content in node.metadata.get('cron/jobs', {}).items():
files['/etc/cron.d/{}'.format(crontab)] = {
'source': 'cron_template',
'content_type': 'mako',
'context': {
'cron': content,
}
}
for vhost, config in node.metadata.get('nginx/vhosts', {}).items():
if not 'domain' in config:
config['domain'] = vhost
files['/etc/nginx/sites-available/{}'.format(vhost)] = {
'source': 'site_template',
'content_type': 'mako',
'context': {
'vhost': vhost,
**config,
},
'triggers': {
'svc_systemd:nginx:restart',
},
}
symlinks['/etc/nginx/sites-enabled/{}'.format(vhost)] = {
'target': '/etc/nginx/sites-available/{}'.format(vhost),
'triggers': {
'svc_systemd:nginx:restart',
},
}
if not 'webroot' in config:
directories['/var/www/{}'.format(vhost)] = config.get('webroot_config', {})
svc_systemd = {
'nginx': {},
'apt-daily.timer': {
'running': False,
'enabled': False,
},
'apt-daily-upgrade.timer': {
'running': False,
'enabled': False,
},
}

View file

@ -1,77 +0,0 @@
defaults = {
'apt': {
'unattended-upgrades': {
'day': 5,
'hour': 21,
},
},
'icinga2_api': {
'apt': {
'services': {
'UNATTENDED UPGRADES': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_unattended_upgrades',
},
},
},
'nginx': {
'services': {
'NGINX PROCESS': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit nginx',
},
},
},
},
}
@metadata_reactor.provides(
'cron/jobs/upgrade-and-reboot'
)
def patchday(metadata):
day = metadata.get('apt/unattended-upgrades/day')
hour = metadata.get('apt/unattended-upgrades/hour')
return {
'cron': {
'jobs': {
'upgrade-and-reboot': '{minute} {hour} * * {day} root /usr/local/sbin/upgrade-and-reboot'.format(
minute=node.magic_number % 30,
hour=hour,
day=day,
),
},
},
}
@metadata_reactor.provides(
'icinga2_api/nginx/services',
)
def monitoring(metadata):
services = {}
for vname, vconfig in metadata.get('nginx/vhosts', {}).items():
domain = vconfig.get('domain', vname)
if 'website_check_path' in vconfig and 'website_check_string' in vconfig:
services['NGINX VHOST {} CONTENT'.format(vname)] = {
'check_command': 'check_http_wget',
'vars.http_wget_contains': vconfig['website_check_string'],
'vars.http_wget_url': 'https://{}{}'.format(domain, vconfig['website_check_path']),
'vars.notification.sms': True,
}
if vconfig.get('check_ssl', True):
services['NGINX VHOST {} CERTIFICATE'.format(vname)] = {
'check_command': 'check_https_cert_at_url',
'vars.domain': domain,
'vars.notification.mail': True,
}
return {
'icinga2_api': {
'nginx': {
'services': services,
},
},
}

View file

@ -1,42 +0,0 @@
@metadata_reactor.provides(
'cron/jobs/check-mail-received',
'icinga2_api/check-mail-received/services',
)
def process_metadata(metadata):
cron = set()
services = {}
my_mail_address = 'root@{}'.format(metadata.get('hostname'))
for name, config in metadata.get('check-mail-received', {}).items():
cron.add('{minute} {hour} * * * root date | mail -s "daily test mail from {node}" -r {source} {target}'.format(
minute=node.magic_number%60,
hour=node.magic_number%24,
node=node.name,
source=my_mail_address,
target=config['email'],
))
services[f'MAIL RECEIVED ON {name}'] = {
'check_command': 'check_imap_for_mail_from',
'check_interval': '15m',
'retry_interval': '5m',
'vars.sshmon_timeout': 30,
'vars.imap_host': config['imap_host'],
'vars.imap_user': config.get('imap_user', config['email']),
'vars.imap_pass': config['imap_pass'],
'vars.imap_from': my_mail_address,
}
return {
'cron': {
'jobs': {
'check-mail-received': '\n'.join(sorted(cron)),
},
},
'icinga2_api': {
'check-mail-received': {
'services': services,
},
},
}

View file

@ -1,8 +0,0 @@
# CAUTION! This file is managed with bundlewrap.
# Any manual edits will be lost!
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
MAILTO=${node.metadata.get('cron/mail_to', repo.libs.defaults.hostmaster_email)}
${cron}

View file

@ -1,11 +0,0 @@
# CAUTION! This file is managed with bundlewrap.
# Any manual edits will be lost!
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
MAILTO=${node.metadata.get('cron/mail_to', repo.libs.defaults.hostmaster_email)}
${min} * * * * root cd / && run-parts --report /etc/cron.hourly
${min} 6 * * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.daily )
${min} 6 * * 7 root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.weekly )
${min} 6 1 * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.monthly )

View file

@ -1,41 +1,6 @@
if node.os == 'arch':
service_name = 'cronie'
package_name = 'pkg_pacman:cronie'
else:
service_name = 'cron'
package_name = 'pkg_apt:cron'
files = {}
files = {
'/etc/crontab': {
'content_type': 'mako',
'context': {
'min': (node.magic_number%60),
},
},
}
directories = {
'/etc/cron.d': {
'purge': True,
'after': {
'pkg_apt:',
},
},
}
svc_systemd = {
service_name: {
'needs': {
package_name,
},
},
}
for crontab, content in node.metadata.get('cron/jobs', {}).items():
for crontab, content in node.metadata.get('cron', {}).items():
files['/etc/cron.d/{}'.format(crontab)] = {
'source': 'cron_template',
'content_type': 'mako',
'context': {
'cron': content,
}
'content': content + "\n",
}

View file

@ -1,12 +0,0 @@
defaults = {
'apt': {
'packages': {
'cron': {},
},
},
'pacman': {
'packages': {
'cronie': {},
},
},
}

View file

@ -1,26 +0,0 @@
for dev, attrs in node.metadata.get('dm-crypt/encrypted-devices', {}).items():
actions['dm-crypt_format_' + dev] = {
'cascade_skip': False,
'command': f'cryptsetup --batch-mode luksFormat --cipher aes-xts-plain64 --key-size 512 {dev}',
'comment': 'Careful: This destroys the current contents of that device. Afterwards, it will be encrypted using dm-crypt.',
'data_stdin': attrs['passphrase'],
'unless': f'blkid -t TYPE=crypto_LUKS {dev}',
'needs': {
'pkg_apt:cryptsetup',
},
}
actions['dm-crypt_open_' + attrs['dm-name']] = {
'cascade_skip': False,
'command': 'cryptsetup --batch-mode luksOpen {dev} {dm_name}'.format(
dev=dev,
dm_name=attrs['dm-name'],
),
'comment': 'Unlocks the device and makes it available as /dev/mapper/{}'.format(attrs['dm-name']),
'data_stdin': attrs['passphrase'],
'needs': {
f'action:dm-crypt_format_{dev}',
'pkg_apt:cryptsetup',
},
'unless': 'test -e /dev/mapper/{}'.format(attrs['dm-name']),
}

View file

@ -1,7 +0,0 @@
defaults = {
'apt': {
'packages': {
'cryptsetup': {},
},
},
}

View file

@ -1,6 +0,0 @@
connect = host=localhost dbname=${dbname} user=${dbuser} password=${dbpass}
driver = pgsql
default_pass_scheme = MD5-CRYPT
password_query = SELECT username as user, password FROM mailbox WHERE username = '%u' AND active = true
user_query = SELECT '/var/mail/vmail/' || maildir as home, 65534 as uid, 65534 as gid FROM mailbox WHERE username = '%u' AND active = true
iterate_query = SELECT username as user FROM mailbox WHERE active = true

View file

@ -1,185 +0,0 @@
!include conf.d/*.conf
namespace inbox {
type = private
inbox = yes
location =
mailbox Drafts {
auto = subscribe
special_use = \Drafts
}
mailbox Junk {
auto = create
special_use = \Junk
autoexpunge = 30d
}
mailbox Sent {
auto = subscribe
special_use = \Sent
}
mailbox Trash {
auto = subscribe
special_use = \Trash
autoexpunge = 360d
}
prefix =
}
mail_location = maildir:/var/mail/vmail/%d/%n
protocols = imap lmtp sieve
ssl = yes
ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/fullchain.pem
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('postfix/myhostname', node.metadata['hostname'])}/privkey.pem
ssl_dh = </etc/dovecot/ssl/dhparam.pem
ssl_min_protocol = TLSv1.2
ssl_cipher_list = EECDH+AESGCM:EDH+AESGCM
ssl_prefer_server_ciphers = yes
login_greeting = IMAPd ready
auth_mechanisms = plain login
first_valid_uid = 65534
disable_plaintext_auth = yes
mail_plugins = $mail_plugins zlib old_stats fts fts_xapian
plugin {
zlib_save_level = 6
zlib_save = gz
sieve = /var/mail/vmail/sieve/%d/%n.sieve
sieve_dir = /var/mail/vmail/sieve/%d/%n/
sieve_extensions = +vnd.dovecot.pipe
sieve_pipe_bin_dir = /var/mail/vmail/sieve/bin
sieve_plugins = sieve_imapsieve sieve_extprograms
sieve_user_log = /var/mail/vmail/sieve/%d/%n.log
old_stats_refresh = 30 secs
old_stats_track_cmds = yes
fts = xapian
fts_xapian = partial=3 full=20
fts_autoindex = yes
fts_enforced = yes
# Index attachements
fts_decoder = decode2text
% if node.has_bundle('rspamd'):
sieve_before = /var/mail/vmail/sieve/global/spam-global.sieve
# From elsewhere to Spam folder
imapsieve_mailbox1_name = Junk
imapsieve_mailbox1_causes = COPY
imapsieve_mailbox1_before = file:/var/mail/vmail/sieve/global/learn-spam.sieve
# From Spam folder to elsewhere
imapsieve_mailbox2_name = *
imapsieve_mailbox2_from = Junk
imapsieve_mailbox2_causes = COPY
imapsieve_mailbox2_before = file:/var/mail/vmail/sieve/global/learn-ham.sieve
% endif
}
service auth {
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
unix_listener auth-userdb {
mode = 0660
user = nobody
group = nogroup
}
}
service decode2text {
executable = script /usr/lib/dovecot/decode2text.sh
user = dovecot
unix_listener decode2text {
mode = 0666
}
}
service indexer-worker {
vsz_limit = 0
process_limit = 0
}
service imap {
executable = imap
}
service imap-login {
service_count = 1
process_min_avail = 8
vsz_limit = 64M
}
service lmtp {
unix_listener /var/spool/postfix/private/dovecot-lmtp {
group = postfix
mode = 0600
user = postfix
}
}
service managesieve-login {
inet_listener sieve {
port = 4190
}
}
userdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
passdb {
driver = sql
args = /etc/dovecot/dovecot-sql.conf
}
protocol lmtp {
mail_plugins = $mail_plugins sieve
postmaster_address = ${admin_email}
}
protocol imap {
mail_plugins = $mail_plugins imap_zlib imap_sieve imap_old_stats
mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins
}
protocol sieve {
plugin {
sieve = /var/mail/vmail/sieve/%d/%n.sieve
sieve_storage = /var/mail/vmail/sieve/%d/%n/
}
}
service old-stats {
% if node.has_bundle('telegraf'):
inet_listener {
address = 127.0.0.1
port = 24242
}
% endif
unix_listener old-stats {
mode = 0660
user = nobody
group = nogroup
}
fifo_listener old-stats-mail {
mode = 0660
user = nobody
group = nogroup
}
fifo_listener old-stats-user {
mode = 0660
user = nobody
group = nogroup
}
}

View file

@ -1,15 +0,0 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve", "environment", "variables"];
if environment :matches "imap.mailbox" "*" {
set "mailbox" "${1}";
}
if string "${mailbox}" "Trash" {
stop;
}
if environment :matches "imap.user" "*" {
set "username" "${1}";
}
pipe :copy "sa-learn-ham.sh" [ "${username}" ];

View file

@ -1,7 +0,0 @@
require ["vnd.dovecot.pipe", "copy", "imapsieve", "environment", "variables"];
if environment :matches "imap.user" "*" {
set "username" "${1}";
}
pipe :copy "sa-learn-spam.sh" [ "${username}" ];

View file

@ -1,11 +0,0 @@
require ["fileinto", "imap4flags"];
if header :contains "X-Spam-Status" "Yes" {
setflag "\\seen";
fileinto "Junk";
}
if header :contains "X-Spam" "Yes" {
setflag "\\seen";
fileinto "Junk";
}

View file

@ -1,96 +0,0 @@
# Postfix bundle creates metadata and directories which are also used
# by this bundle
repo.libs.tools.require_bundle(node, 'postfix')
directories = {
'/etc/dovecot/ssl': {},
}
files = {
'/etc/dovecot/dovecot.conf': {
'content_type': 'mako',
'context': {
'admin_email': node.metadata['dovecot']['admin_email'],
},
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/dovecot-sql.conf': {
'content_type': 'mako',
'context': node.metadata['dovecot']['database'],
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/conf.d/auth-system.conf.ext': {
'delete': True,
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
'/etc/dovecot/conf.d/10-auth.conf': {
'delete': True,
'needs': {
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
}
symlinks['/usr/lib/dovecot/decode2text.sh'] = {
'target': '/usr/share/doc/dovecot-core/examples/decode2text.sh',
'before': {
'svc_systemd:dovecot',
},
}
actions = {
'dovecot_generate_dhparam': {
'command': 'openssl dhparam -out /etc/dovecot/ssl/dhparam.pem 2048',
'unless': 'test -f /etc/dovecot/ssl/dhparam.pem',
'cascade_skip': False,
'needs': {
'directory:/etc/dovecot/ssl',
'pkg_apt:'
},
'triggers': {
'svc_systemd:dovecot:restart',
},
},
}
svc_systemd = {
'dovecot': {
'needs': {
'action:dovecot_generate_dhparam',
'file:/etc/dovecot/dovecot.conf',
'file:/etc/dovecot/dovecot-sql.conf',
},
},
}
if node.has_bundle('rspamd'):
files['/var/mail/vmail/sieve/global/learn-ham.sieve'] = {
'owner': 'nobody',
'group': 'nogroup',
}
files['/var/mail/vmail/sieve/global/learn-spam.sieve'] = {
'owner': 'nobody',
'group': 'nogroup',
}
files['/var/mail/vmail/sieve/global/spam-global.sieve'] = {
'owner': 'nobody',
'group': 'nogroup',
}

View file

@ -1,105 +0,0 @@
from bundlewrap.metadata import atomic
defaults = {
'apt': {
'packages': {
'dovecot-fts-xapian': {},
'dovecot-imapd': {},
'dovecot-lmtpd': {},
'dovecot-managesieved': {},
'dovecot-pgsql': {},
'dovecot-sieve': {},
},
},
'icinga2_api': {
'dovecot': {
'services': {
'DOVECOT PROCESS': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit dovecot',
},
'IMAP CONNECT': {
'check_command': 'check_imap',
'vars.imap_port': 143,
'vars.notification.sms': True,
},
'IMAPS CONNECT': {
'check_command': 'check_imap',
'vars.imap_port': 993,
'vars.imap_ssl': True,
'vars.notification.sms': True,
},
},
},
},
'letsencrypt': {
'reload_after': {
'dovecot',
},
},
'systemd-timers': {
'timers': {
'dovecot_fts_optimize': {
'command': [
'/usr/bin/doveadm fts optimize -A',
],
'when': '02:{}:00'.format(node.magic_number % 60),
},
},
},
}
if node.has_bundle('postfixadmin'):
defaults['dovecot'] = {
'database': {
'dbname': 'postfixadmin',
'dbuser': 'postfixadmin',
},
}
if node.has_bundle('telegraf'):
defaults['telegraf'] = {
'input_plugins': {
'builtin': {
'dovecot': [{
'type': 'global',
}],
},
},
}
@metadata_reactor.provides(
'dovecot/admin_email',
'dovecot/database/dbpass',
)
def import_database_settings_from_postfixadmin(metadata):
if not node.has_bundle('postfixadmin'):
raise DoNotRunAgain
return {
'dovecot': {
'admin_email': metadata.get('postfixadmin/admin_email'),
'database': {
'dbpass': metadata.get('postgresql/roles/postfixadmin/password'),
},
},
}
@metadata_reactor.provides(
'firewall/port_rules',
'firewall/port_rules',
'firewall/port_rules',
)
def firewall(metadata):
return {
'firewall': {
'port_rules': {
# imap(s)
'143/tcp': atomic(metadata.get('dovecot/restrict-to', {'*'})),
'993/tcp': atomic(metadata.get('dovecot/restrict-to', {'*'})),
# managesieve
'4190/tcp': atomic(metadata.get('dovecot/restrict-to', {'*'})),
},
},
}

View file

@ -1,41 +0,0 @@
from bundlewrap.metadata import metadata_to_json
repo.libs.tools.require_bundle(node, 'nodejs')
directories = {
'/opt/element-web': {}
}
git_deploy = {
'/opt/element-web': {
'rev': node.metadata.get('element-web/version'),
'repo': 'https://github.com/vector-im/element-web.git',
'triggers': {
'action:element-web_yarn',
},
},
}
files = {
'/opt/element-web/webapp/config.json': {
'content': metadata_to_json(node.metadata.get('element-web/config')),
'needs': {
'action:element-web_yarn',
},
},
}
actions = {
'element-web_yarn': {
'command': ' && '.join([
'cd /opt/element-web',
'yarn install --pure-lockfile --ignore-scripts',
'yarn build',
]),
'needs': {
'action:nodejs_install_yarn',
'pkg_apt:nodejs',
},
'triggered': True,
},
}

View file

@ -1,46 +0,0 @@
defaults = {
'zfs': {
'datasets': {
'tank/element-web': {
'mountpoint': '/opt/element-web',
'needed_by': {
'directory:/opt/element-web',
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts/element-web',
)
def nginx_config(metadata):
return {
'nginx': {
'vhosts': {
'element-web': {
'domain': metadata.get('element-web/url'),
'webroot': '/opt/element-web/webapp/',
},
},
},
}
@metadata_reactor.provides(
'icinga2_api/element-web/services',
)
def icinga_check_for_new_release(metadata):
return {
'icinga2_api': {
'element-web': {
'services': {
'ELEMENT-WEB UPDATE': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_github_for_new_release vector-im/element-web {}'.format(metadata.get('element-web/version')),
'vars.notification.mail': True,
'check_interval': '60m',
},
},
},
},
}

View file

@ -1,89 +0,0 @@
APP_NAME = ${app_name}
RUN_USER = git
RUN_MODE = prod
WORK_PATH = /var/lib/forgejo
[repository]
ROOT = /var/lib/forgejo/repositories
MAX_CREATION_LIMIT = 0
DEFAULT_BRANCH = main
[ui]
ISSUE_PAGING_NUM = 50
MEMBERS_PAGING_NUM = 100
[server]
PROTOCOL = http
SSH_DOMAIN = ${domain}
DOMAIN = ${domain}
HTTP_ADDR = 127.0.0.1
HTTP_PORT = 22000
ROOT_URL = https://${domain}/
DISABLE_SSH = false
SSH_PORT = 22
LFS_START_SERVER = true
LFS_JWT_SECRET = ${lfs_secret_key}
OFFLINE_MODE = true
START_SSH_SERVER = false
DISABLE_ROUTER_LOG = true
LANDING_PAGE = explore
[database]
DB_TYPE = postgres
HOST = ${database.get('host', 'localhost')}:5432
NAME = ${database['database']}
USER = ${database['username']}
PASSWD = ${database['password']}
SSL_MODE = disable
LOG_SQL = false
[admin]
DEFAULT_EMAIL_NOTIFICATIONS = onmention
DISABLE_REGULAR_ORG_CREATION = true
[security]
INTERNAL_TOKEN = ${internal_token}
INSTALL_LOCK = true
SECRET_KEY = ${security_secret_key}
LOGIN_REMEMBER_DAYS = 30
DISABLE_GIT_HOOKS = ${str(not enable_git_hooks).lower()}
[openid]
ENABLE_OPENID_SIGNIN = false
ENABLE_OPENID_SIGNUP = false
[service]
REGISTER_EMAIL_CONFIRM = true
ENABLE_NOTIFY_MAIL = true
DISABLE_REGISTRATION = ${str(disable_registration).lower()}
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = false
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.${domain}
EMAIL_DOMAIN_BLOCKLIST = ${','.join(sorted(email_domain_blocklist))}
[mailer]
ENABLED = true
PROTOCOL = sendmail
FROM = "${app_name}" <noreply@${domain}>
[session]
PROVIDER = file
[picture]
DISABLE_GRAVATAR = true
ENABLE_FEDERATED_AVATAR = false
[log]
MODE = console
LEVEL = warn
[oauth2]
JWT_SECRET = ${oauth_secret_key}
[other]
SHOW_FOOTER_BRANDING = true
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false

View file

@ -1,17 +0,0 @@
[Unit]
Description=${app_name} at ${domain}
After=syslog.target
After=network.target
Requires=postgresql.service
[Service]
RestartSec=10
Type=simple
User=git
Group=git
WorkingDirectory=/var/lib/forgejo
ExecStart=/usr/local/bin/forgejo web -c /etc/forgejo/app.ini
Restart=always
[Install]
WantedBy=multi-user.target

View file

@ -1,65 +0,0 @@
users = {
'git': {
'home': '/var/lib/forgejo',
},
}
directories = {
'/var/lib/forgejo/.ssh': {
'mode': '0700',
'owner': 'git',
'group': 'git',
},
'/var/lib/forgejo': {
'owner': 'git',
'mode': '0700',
'triggers': {
'svc_systemd:forgejo:restart',
},
},
}
files = {
'/usr/local/lib/systemd/system/forgejo.service': {
'content_type': 'mako',
'context': node.metadata.get('forgejo'),
'triggers': {
'action:systemd-reload',
'svc_systemd:forgejo:restart',
},
},
'/etc/forgejo/app.ini': {
'content_type': 'mako',
'context': node.metadata.get('forgejo'),
'triggers': {
'svc_systemd:forgejo:restart',
},
},
'/usr/local/bin/forgejo': {
'content_type': 'download',
'source': 'https://codeberg.org/forgejo/forgejo/releases/download/v{0}/forgejo-{0}-linux-amd64'.format(node.metadata.get('forgejo/version')),
'content_hash': node.metadata.get('forgejo/sha1', None),
'mode': '0755',
'triggers': {
'svc_systemd:forgejo:restart',
},
},
}
if node.metadata.get('forgejo/install_ssh_key', False):
files['/var/lib/forgejo/.ssh/id_ed25519'] = {
'content': repo.vault.decrypt_file(f'forgejo/files/ssh-keys/{node.name}.key.vault'),
'mode': '0600',
'owner': 'git',
'group': 'git',
}
svc_systemd = {
'forgejo': {
'needs': {
'file:/etc/forgejo/app.ini',
'file:/usr/local/bin/forgejo',
'file:/usr/local/lib/systemd/system/forgejo.service',
},
},
}

View file

@ -1,107 +0,0 @@
defaults = {
'backups': {
'paths': {
'/var/lib/forgejo',
},
},
'forgejo': {
'app_name': 'Forgejo',
'database': {
'username': 'forgejo',
'password': repo.vault.password_for('{} postgresql forgejo'.format(node.name)),
'database': 'forgejo',
},
'disable_registration': True,
'email_domain_blocklist': set(),
'enable_git_hooks': False,
'internal_token': repo.vault.password_for('{} forgejo internal_token'.format(node.name)),
'lfs_secret_key': repo.vault.password_for('{} forgejo lfs_secret_key'.format(node.name)),
'oauth_secret_key': repo.vault.password_for('{} forgejo oauth_secret_key'.format(node.name)),
'security_secret_key': repo.vault.password_for('{} forgejo security_secret_key'.format(node.name)),
},
'icinga2_api': {
'forgejo': {
'services': {
'FORGEJO PROCESS': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit forgejo',
},
'FORGEJO UPDATE': {
'vars.notification.mail': True,
'check_interval': '60m',
},
},
},
},
'openssh': {
'allowed_users': {
'git',
},
},
'postgresql': {
'roles': {
'forgejo': {
'password': repo.vault.password_for('{} postgresql forgejo'.format(node.name)),
},
},
'databases': {
'forgejo': {
'owner': 'forgejo',
},
},
},
'zfs': {
'datasets': {
'tank/forgejo': {
'mountpoint': '/var/lib/forgejo',
'needed_by': {
'directory:/var/lib/forgejo',
},
},
},
},
}
@metadata_reactor.provides(
'icinga2_api/forgejo',
)
def update_monitoring(metadata):
return {
'icinga2_api': {
'forgejo': {
'services': {
'FORGEJO UPDATE': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_forgejo_for_new_release codeberg.org forgejo/forgejo v{}'.format(metadata.get('forgejo/version')),
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts/forgejo',
)
def nginx(metadata):
if not node.has_bundle('nginx'):
raise DoNotRunAgain
return {
'nginx': {
'vhosts': {
'forgejo': {
'domain': metadata.get('forgejo/domain'),
'locations': {
'/': {
'target': 'http://127.0.0.1:22000',
},
'/debug': {
'return': 403,
},
},
'website_check_path': '/user/login',
'website_check_string': 'Sign In',
},
},
},
}

View file

@ -0,0 +1,13 @@
svc_systemd = {}
for i in (
'google-accounts-daemon.service',
'google-accounts-manager.service',
'google-clock-skew-daemon.service',
'google-clock-sync-manager.service',
'sshguard.service'
):
svc_systemd[i] = {
'enabled': False,
'running': False,
}

View file

@ -1,242 +0,0 @@
def dashboard_row_battery(panel_id, node):
return {
'title': 'battery',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "battery" and
r["host"] == "{node.name}" and
(
r["_field"] == "energy_full" or
r["_field"] == "energy_now"
)
)
|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
|> map(fn: (r) => ({{
r with
_value: float(v: r.energy_now) / float(v: r.energy_full) * 100.0
}})
)
|> drop(columns: ["energy_now", "energy_full"])""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'battery charge',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'percent',
'label': None,
'logBase': 1,
'max': 100,
'min': 0,
'show': True,
'decimals': 2,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "battery" and
r["_field"] == "power_now" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: float(v: r._value) / 1000000.0
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "fan")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'power draw from battery',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'watts',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 1,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,247 +0,0 @@
def dashboard_row_cpu(panel_id, node):
queries_cpu = []
queries_load = []
for measurement in [
'user',
'system',
'steal',
'iowait',
'nice',
'softirq',
'guest',
'guest_nice',
]:
queries_cpu.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "cpu" and
r["_field"] == "usage_{measurement}" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "{measurement}"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement in [
'load1',
'load5',
'load15',
]:
queries_load.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "system" and
r["_field"] == "{measurement}" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
return {
'title': 'cpu/load',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 10,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 0,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': queries_cpu,
'thresholds': [],
'timeRegions': [],
'title': 'cpu',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'percent',
'label': None,
'logBase': 1,
'max': 100,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': queries_load,
'thresholds': [],
'timeRegions': [],
'title': 'load',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,441 +0,0 @@
def dashboard_row_disk_iops(panel_id, node):
return {
'title': 'disk iops',
'collapse': False,
'editable': False,
'height': '200px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "diskio" and
r["_field"] == "reads" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "read")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'read IOPS',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "diskio" and
r["_field"] == "writes" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "write")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'write IOPS',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "diskio" and
r["_field"] == "read_bytes" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "read")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'read bytes',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'binBps',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "diskio" and
r["_field"] == "write_bytes" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "write")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'write bytes',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'binBps',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,236 +0,0 @@
def dashboard_row_disk_space(panel_id, node):
queries_bytes = []
queries_inodes = []
for measurement in [
'used',
'free',
]:
queries_bytes.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "disk" and
r["_field"] == "{measurement}" and
r["fstype"] == "ext4" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement in [
'inodes_used',
'inodes_free',
]:
queries_inodes.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "disk" and
r["_field"] == "{measurement}" and
r["fstype"] == "ext4" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
return {
'title': 'disk space',
'collapse': False,
'editable': False,
'height': '200px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.path}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': queries_bytes,
'thresholds': [],
'timeRegions': [],
'title': 'disk space',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'bytes',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.path}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': queries_inodes,
'thresholds': [],
'timeRegions': [],
'title': 'disk inodes',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'bytes',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,252 +0,0 @@
def dashboard_row_dovecot(panel_id, node):
return {
'title': 'dovecot',
'collapse': False,
'editable': False,
'height': '200px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "dovecot" and
r["_field"] == "num_connected_sessions" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "num_connected_sessions")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
}],
'thresholds': [],
'timeRegions': [],
'title': 'dovecot connected sessions',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "dovecot" and
r["_field"] == "read_bytes" and
r["host"] == "{node.name}"
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "read")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "dovecot" and
r["_field"] == "write_bytes" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: r._value * -1
}})
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "write")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'dovecot traffic',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'binBps',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,583 +0,0 @@
def dashboard_row_ip_traffic(panel_id, node):
return {
'title': 'ip traffic',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.interface}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "net" and
r["_field"] == "bytes_recv" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "in"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "in")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "net" and
r["_field"] == "bytes_sent" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: r._value * -1,
_field: "out"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "out")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'bytes per interface',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'binBps',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.interface}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "net" and
r["_field"] == "packets_recv" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "in"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "in")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "net" and
r["_field"] == "packets_sent" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: r._value * -1,
_field: "out"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "out")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'packets per interface',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "nstat" and
r["_field"] == "IpExtInOctets" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "in"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "in")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "nstat" and
r["_field"] == "IpExtOutOctets" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: r._value * -1,
_field: "out"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "out")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'IPv4',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'binBps',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "nstat" and
r["_field"] == "Ip6InOctets" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "in"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "in")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "nstat" and
r["_field"] == "Ip6OutOctets" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: r._value * -1,
_field: "out"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s)
|> yield(name: "out")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'IPv6',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'binBps',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,447 +0,0 @@
def dashboard_row_ipmitool(panel_id, node):
return {
'title': 'ipmitool',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 8,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "ipmi_sensor" and
r["unit"] == "degrees_c" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "cpu")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'temperatures',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'celsius',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "ipmi_sensor" and
r["unit"] == "rpm" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "fan")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'fans',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'rotrpm',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 5,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "ipmi_sensor" and
r["unit"] == "volts" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "cpu")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'voltages',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'volts',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.name}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 7,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "ipmi_sensor" and
r["unit"] == "watts" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "fan")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'power',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'watts',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,126 +0,0 @@
def dashboard_row_memory(panel_id, node):
queries_mem = []
for measurement in [
'used',
'buffered',
'cached',
'sreclaimable',
'sunreclaim',
'free',
]:
queries_mem.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "mem" and
r["_field"] == "{measurement}" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
return {
'title': 'memory',
'collapse': False,
'editable': False,
'height': '200px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 12,
'stack': True,
'steppedLine': False,
'targets': queries_mem,
'thresholds': [],
'timeRegions': [],
'title': 'memory usage',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'bytes',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,376 +0,0 @@
def dashboard_row_nginx(panel_id, node):
queries_through = []
queries_conn = []
queries_timing = []
for measurement in [
'accepted',
'handled',
'requests',
]:
queries_through.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "nginx" and
r["_field"] == "{measurement}" and
r["host"] == "{node.name}"
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement in [
'active',
'reading',
'writing',
'waiting',
]:
queries_conn.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "nginx" and
r["_field"] == "{measurement}" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement in [
'request_time',
'upstream_response_time',
]:
queries_timing.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "nginx_timing" and
r["_field"] == "{measurement}" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "{measurement}"
}})
)
|> group(columns: ["path", "_field"])
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
return {
'title': 'nginx',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': queries_conn,
'thresholds': [],
'timeRegions': [],
'title': 'nginx connections',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': queries_through,
'thresholds': [],
'timeRegions': [],
'title': 'nginx throughput',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.path}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': True,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': True,
'show': True,
'total': False,
'values': False
},
'lines': False,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': True,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 12,
'stack': False,
'steppedLine': False,
'targets': queries_timing,
'thresholds': [
{
'colorMode': 'warning',
'fill': False,
'line': True,
'op': 'gt',
'value': 5,
'yaxis': 'left'
},
{
'colorMode': 'critical',
'fill': False,
'line': True,
'op': 'gt',
'value': 15,
'yaxis': 'left'
}
],
'timeRegions': [],
'title': 'nginx timing',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 's',
'label': 'request time',
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,126 +0,0 @@
def dashboard_row_postfix(panel_id, node):
queries = []
for measurement in [
'active',
'corrupt',
'deferred',
'hold',
'incoming',
]:
queries.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "postfix_queue" and
r["_field"] == "{measurement}" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
return {
'title': 'postfix',
'collapse': False,
'editable': False,
'height': '200px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 12,
'stack': True,
'steppedLine': False,
'targets': queries,
'thresholds': [],
'timeRegions': [],
'title': 'postfix queue',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,606 +0,0 @@
def dashboard_row_postgresql(panel_id, node):
queries_transactions = []
queries_rows = []
queries_conflicts = []
queries_blocks = []
queries_buffers = []
for measurement in [
'commit',
'rollback',
]:
queries_transactions.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "postgresql" and
r["_field"] == "xact_{measurement}" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "{measurement}"
}})
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement in [
'deleted',
'fetched',
'inserted',
'returned',
'updated',
]:
queries_rows.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "postgresql" and
r["_field"] == "tup_{measurement}" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "{measurement}"
}})
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement in [
'conflicts',
'deadlocks',
]:
queries_conflicts.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "postgresql" and
r["_field"] == "{measurement}" and
r["host"] == "{node.name}"
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement in [
'read',
'hit',
]:
queries_blocks.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "postgresql" and
r["_field"] == "blks_{measurement}" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "{measurement}"
}})
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
for measurement, alias in {
'alloc': 'allocated',
'backend': 'written by backend',
'backend_fsync': 'fsync by backend',
'checkpoint': 'written during checkpoints',
'clean': 'written by background writer',
}.items():
queries_buffers.append({
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "postgresql" and
r["_field"] == "buffers_{measurement}" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "{alias}"
}})
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "{measurement}")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
})
return {
'title': 'postgresql',
'collapse': False,
'editable': False,
'height': '200px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.db}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': queries_transactions,
'thresholds': [],
'timeRegions': [],
'title': 'postgresql transactions per second',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.db}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': queries_rows,
'thresholds': [],
'timeRegions': [],
'title': 'postgresql rows per second',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.db}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': True,
'steppedLine': False,
'targets': queries_conflicts,
'thresholds': [],
'timeRegions': [],
'title': 'postgresql conflicts/deadlocks',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.db}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': True,
'steppedLine': False,
'targets': queries_blocks,
'thresholds': [],
'timeRegions': [],
'title': 'postgresql blocks read per second',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': True,
'steppedLine': False,
'targets': queries_buffers,
'thresholds': [],
'timeRegions': [],
'title': 'postgresql buffers',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,248 +0,0 @@
def dashboard_row_rspamd(panel_id, node):
return {
'title': 'rspamd',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': True,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': False,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': [{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "rspamd_actions" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1m, nonNegative: true)
|> yield(name: "value")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
}],
'thresholds': [],
'timeRegions': [],
'title': 'rspamd actions',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "rspamd_stats" and
r["_field"] == "scanned" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1m, nonNegative: true)
|> yield(name: "avg")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "rspamd_stats" and
r["_field"] == "learned" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1m, nonNegative: true)
|> yield(name: "mean")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'rspamd scanned/learned',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,228 +0,0 @@
def dashboard_row_sensors(panel_id, node):
return {
'title': 'sensors',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.chip} ${__field.labels.feature}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 8,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "sensors" and
r["_field"] == "temp_input" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "cpu")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'temperatures',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'celsius',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.chip} ${__field.labels.feature}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "sensors" and
r["_field"] == "fan_input" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "fan")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'fans',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'rotrpm',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,118 +0,0 @@
def dashboard_row_smartd(panel_id, node):
return {
'title': 'smartd',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.device}'
},
'overrides': []
},
'fill': 0,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'hideEmpty': True,
'hideZero': True,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 12,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "smartd_stats" and
r["_field"] == "temperature" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "cpu")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'temperatures',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'celsius',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,263 +0,0 @@
def dashboard_row_unbound(panel_id, node):
return {
'title': 'unbound',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "unbound" and
r["_field"] == "total_num_queries" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "total_num_queries")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
}],
'thresholds': [],
'timeRegions': [],
'title': 'unbound queries per second',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "unbound" and
r["_field"] == "total_recursion_time_avg" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "avg")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "unbound" and
r["_field"] == "total_recursion_time_mean" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "mean")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [
{
'colorMode': 'warning',
'fill': True,
'line': True,
'op': 'gt',
'value': 1,
'yaxis': 'left'
},
{
'colorMode': 'critical',
'fill': True,
'line': True,
'op': 'gt',
'value': 5,
'yaxis': 'left'
}
],
'timeRegions': [],
'title': 'unbound recursion time',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 's',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,258 +0,0 @@
def dashboard_row_wireguard(panel_id, node):
return {
'title': 'wireguard',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.public_key}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "wireguard_peer" and
r["_field"] == "last_handshake_time_ns" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: r._value / 1000000000
}})
)
|> derivative(unit: 1s, nonNegative: true)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "last_handshake_time_ns")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
}],
'thresholds': [],
'timeRegions': [],
'title': 'wireguard last handshake time',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 's',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name} ${__field.labels.public_key}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "wireguard_peer" and
r["_field"] == "rx_bytes" and
r["host"] == "{node.name}"
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "in")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "wireguard_peer" and
r["_field"] == "tx_bytes" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_value: r._value * -1
}})
)
|> derivative(unit: 1s)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "out")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'wireguard traffic',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'binBps',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,724 +0,0 @@
def dashboard_row_zfs(panel_id, node):
return {
'title': 'zfs',
'collapse': False,
'editable': False,
'height': '250px',
'panels': [
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs" and
r["_field"] == "arcstats_c" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "target"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "target")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs" and
r["_field"] == "arcstats_size" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "used"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "used")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'zfs arc usage',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'bytes',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs" and
r["_field"] == "arcstats_l2_size" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "used"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "used")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'zfs l2arc usage',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'bytes',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 4,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs" and
r["_field"] == "arcstats_hits" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "hits"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s, nonNegative: true)
|> yield(name: "misses")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs" and
r["_field"] == "arcstats_misses" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "misses"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s, nonNegative: true)
|> yield(name: "misses")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs" and
r["_field"] == "arcstats_l2_hits" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "l2hits"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s, nonNegative: true)
|> yield(name: "misses")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs" and
r["_field"] == "arcstats_l2_misses" and
r["host"] == "{node.name}"
)
|> map(fn: (r) => ({{
r with
_field: "l2misses"
}})
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> derivative(unit: 1s, nonNegative: true)
|> yield(name: "misses")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'zfs arc hits/misses',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.dataset} ${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': True,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs_dataset" and
r["_field"] == "used" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "used")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs_dataset" and
r["_field"] == "usedsnap" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "usedsnap")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'zfs usage per dataset',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'bytes',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
{
'aliasColors': {},
'bars': False,
'dashLength': 10,
'dashes': False,
'datasource': None,
'fieldConfig': {
'defaults': {
'displayName': '${__field.labels.pool} ${__field.name}'
},
'overrides': []
},
'fill': 1,
'fillGradient': 0,
'hiddenSeries': False,
'id': next(panel_id),
'legend': {
'alignAsTable': False,
'avg': False,
'current': False,
'max': False,
'min': False,
'rightSide': False,
'show': True,
'total': False,
'values': False
},
'lines': True,
'linewidth': 1,
'NonePointMode': 'None',
'options': {
'alertThreshold': True
},
'percentage': False,
'pluginVersion': '7.5.5',
'pointradius': 2,
'points': False,
'renderer': 'flot',
'seriesOverrides': [],
'spaceLength': 10,
'span': 6,
'stack': False,
'steppedLine': False,
'targets': [
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs_pool" and
r["_field"] == "used" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "used")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
{
'groupBy': [
{'type': 'time', 'params': ['$__interval']},
{'type': 'fill', 'params': ['linear']},
],
'orderByTime': "ASC",
'policy': "default",
'query': f"""from(bucket: "telegraf")
|> range(start: v.timeRangeStart, stop: v.timeRangeStop)
|> filter(fn: (r) =>
r["_measurement"] == "zfs_pool" and
r["_field"] == "size" and
r["host"] == "{node.name}"
)
|> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)
|> yield(name: "size")""",
'resultFormat': 'time_series',
'select': [[
{'type': 'field', 'params': ['value']},
{'type': 'mean', 'params': []},
]],
"tags": []
},
],
'thresholds': [],
'timeRegions': [],
'title': 'zfs usage per pool',
'tooltip': {
'shared': True,
'sort': 0,
'value_type': 'individual'
},
'type': 'graph',
'xaxis': {
'buckets': None,
'mode': 'time',
'name': None,
'show': True,
'values': []
},
'yaxes': [
{
'format': 'bytes',
'label': None,
'logBase': 1,
'max': None,
'min': 0,
'show': True,
'decimals': 0,
},
{
'format': 'short',
'label': None,
'logBase': 1,
'max': None,
'min': None,
'show': False,
}
],
'yaxis': {
'align': False,
'alignLevel': None
}
},
],
}

View file

@ -1,15 +0,0 @@
apiVersion: 1
providers:
- name: 'managed by bundlewrap'
orgId: 1
folder: 'Managed by BundleWrap'
folderUid: '222af3a08b'
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: false
options:
path: /var/lib/grafana/dashboards
foldersFromFilesStructure: false

View file

@ -1,102 +0,0 @@
app_mode = production
instance_name = ${node.name}
[paths]
data = /var/lib/grafana
;temp_data_lifetime = 24h
logs = /var/log/grafana
plugins = /var/lib/grafana/plugins
provisioning = /etc/grafana/provisioning
[server]
protocol = http
http_port = 21010
domain = ${domain}
root_url = https://${domain}/
[database]
type = sqlite3
# for postgres
;host = 127.0.0.1:3306
;name = grafana
;user = root
;password =
;ssl_mode = disable
# for sqlite
;path = grafana.db
;cache_mode = private
[remote_cache]
type = database
[analytics]
reporting_enabled = false
check_for_updates = false
[security]
disable_initial_admin_creation = false
secret_key = ${secret_key}
disable_gravatar = true
cookie_secure = true
allow_embedding = ${str(allow_embedding).lower()}
[dashboards]
min_refresh_interval = 10s
[users]
allow_sign_up = ${str(allow_sign_up).lower()}
allow_org_create = false
auto_assign_org = false
verify_email_enabled = true
default_theme = dark
viewers_can_edit = false
editors_can_admin = false
[auth]
login_maximum_inactive_lifetime_duration = ${login_max_duration}
login_maximum_lifetime_duration = ${login_max_duration}
[auth.anonymous]
enabled = ${str(allow_anonymous).lower()}
org_name = ${anonymous_org}
org_role = Viewer
[smtp]
enabled = ${str(enable_smtp).lower()}
host = localhost:25
from_address = noreply@${domain}
from_name = Grafana
[emails]
welcome_email_on_sign_up = false
templates_pattern = emails/*.html
[log]
mode = console
[alerting]
enabled = false
[explore]
enabled = true
[plugins]
enable_alpha = true
[date_formats]
full_date = YYYY-MM-DD HH:mm:ss
interval_second = HH:mm:ss
interval_minute = HH:mm
interval_hour = YYYY-MM-DD HH:mm
interval_day = YYYY-MM-DD
interval_month = YYYY-MM
interval_year = YYYY
default_timezone = browser
[expressions]
enabled = true
[metrics]
enabled = false

View file

@ -1,172 +0,0 @@
from itertools import count
from os import listdir
from os.path import isdir, isfile, join
from pathlib import Path
from uuid import UUID
from bundlewrap.metadata import metadata_to_json
for row in Path(join(repo.path, 'bundles', 'grafana', 'dashboard-rows')).rglob("*.py"):
with open(row, 'r') as f:
exec(f.read())
directories = {
# Don't ask me why these permissions are that weird. It's what the
# debian package sets them to after upgrades.
'/etc/grafana/provisioning/dashboards': {
'group': 'grafana',
'purge': True,
},
'/etc/grafana/provisioning/datasources': {
'group': 'grafana',
'purge': True,
},
'/etc/grafana/provisioning/notifiers': {
'group': 'grafana',
'purge': True,
},
'/etc/grafana/provisioning/plugins': {
'group': 'grafana',
'purge': True,
},
'/var/lib/grafana/dashboards': {
'owner': 'grafana',
'group': 'grafana',
'purge': True,
'triggers': {
'svc_systemd:grafana-server:restart',
},
},
}
files = {
'/etc/grafana/grafana.ini': {
'content_type': 'mako',
'context': node.metadata['grafana'],
'group': 'grafana',
'mode': '0640',
'triggers': {
'svc_systemd:grafana-server:restart',
},
},
'/etc/grafana/provisioning/dashboards/bundlewrap.yaml': {
'source': 'dashboards.yaml',
'group': 'grafana',
'mode': '0640',
'triggers': {
'svc_systemd:grafana-server:restart',
},
},
}
svc_systemd = {
'grafana-server': {
'needs': {
'file:/etc/grafana/grafana.ini',
'pkg_apt:grafana',
},
},
}
### dashboard management starts here
for rnode in repo.nodes:
if not rnode.has_bundle('telegraf'):
continue
panel_id = count(start=1)
dashboard = {
'title': rnode.name,
'uid': UUID(int=rnode.magic_number).hex[:10],
'editable': False,
'graphTooltip': 1,
'schemaVersion': 12,
'style': 'dark',
'tags': {'bw'},
'time': {
'from': 'now-1d',
'to': 'now'
},
'version': 1,
'rows': [
dashboard_row_cpu(panel_id, rnode),
dashboard_row_ip_traffic(panel_id, rnode),
dashboard_row_memory(panel_id, rnode),
],
}
if rnode.has_bundle('ipmitool'):
dashboard['rows'].append(dashboard_row_ipmitool(panel_id, rnode))
dashboard['tags'].add('ipmitool')
elif rnode.has_bundle('lm-sensors'):
dashboard['rows'].append(dashboard_row_sensors(panel_id, rnode))
dashboard['tags'].add('lm-sensors')
if rnode.has_bundle('smartd'):
dashboard['rows'].append(dashboard_row_smartd(panel_id, rnode))
dashboard['tags'].add('smartd')
if rnode.has_bundle('telegraf-battery-usage'):
dashboard['rows'].append(dashboard_row_battery(panel_id, rnode))
dashboard['rows'].append(dashboard_row_disk_space(panel_id, rnode))
dashboard['rows'].append(dashboard_row_disk_iops(panel_id, rnode))
if rnode.has_bundle('nginx'):
dashboard['rows'].append(dashboard_row_nginx(panel_id, rnode))
dashboard['tags'].add('nginx')
if rnode.has_bundle('postfix'):
dashboard['rows'].append(dashboard_row_postfix(panel_id, rnode))
dashboard['tags'].add('postfix')
if rnode.has_bundle('dovecot'):
dashboard['rows'].append(dashboard_row_dovecot(panel_id, rnode))
dashboard['tags'].add('dovecot')
if rnode.has_bundle('rspamd'):
dashboard['rows'].append(dashboard_row_rspamd(panel_id, rnode))
dashboard['tags'].add('rspamd')
if rnode.has_bundle('postgresql'):
dashboard['rows'].append(dashboard_row_postgresql(panel_id, rnode))
dashboard['tags'].add('postgresql')
if rnode.has_bundle('wireguard'):
dashboard['rows'].append(dashboard_row_wireguard(panel_id, rnode))
dashboard['tags'].add('wireguard')
if rnode.has_bundle('zfs'):
dashboard['rows'].append(dashboard_row_zfs(panel_id, rnode))
dashboard['tags'].add('zfs')
if rnode.has_bundle('unbound'):
dashboard['rows'].append(dashboard_row_unbound(panel_id, rnode))
dashboard['tags'].add('unbound')
files[f'/var/lib/grafana/dashboards/{rnode.name}.json'] = {
'owner': 'grafana',
'group': 'grafana',
# use metadata_to_json, because this supports sets
'content': metadata_to_json(dashboard),
'triggers': {
'svc_systemd:grafana-server:restart',
},
}
additional_path = join(repo.path, 'data', 'grafana', 'files', node.name, 'dashboards')
if isdir(additional_path):
for file in listdir(additional_path):
if not isfile(join(additional_path, file)) or file.startswith('.') or file.startswith('_'):
continue
files[f'/var/lib/grafana/dashboards/{file}'] = {
'owner': 'grafana',
'group': 'grafana',
'source': join(node.name, 'dashboards', file),
'triggers': {
'svc_systemd:grafana-server:restart',
},
}

View file

@ -1,57 +0,0 @@
defaults = {
'apt': {
'packages': {
'grafana': {},
},
'repos': {
'grafana': {
'items': {
'deb https://apt.grafana.com stable main',
},
},
},
},
'backups': {
'paths': {
'/var/lib/grafana',
},
},
'grafana': {
'allow_anonymous': False,
'allow_embedding': False,
'allow_sign_up': False,
'anonymous_org': 'public',
'enable_smtp': True,
'login_max_duration': '24h',
'secret_key': repo.vault.random_bytes_as_base64_for(f'{node.name} grafana secret_key'),
},
}
@metadata_reactor.provides(
'nginx/vhosts/grafana',
)
def nginx(metadata):
if not node.has_bundle('nginx'):
raise DoNotRunAgain
return {
'nginx': {
'vhosts': {
'grafana': {
'domain': metadata.get('grafana/domain'),
'locations': {
'/': {
'target': 'http://127.0.0.1:21010',
},
'/api/ds/query': {
'target': 'http://127.0.0.1:21010',
'proxy_read_timeout': 300,
},
},
'website_check_path': '/login',
'website_check_string': 'Grafana',
},
},
},
}

Some files were not shown because too many files have changed in this diff Show more