sophies-stuff #36

Merged
sophie merged 43 commits from sophies-stuff into main 2021-03-13 12:43:44 +00:00
47 changed files with 594 additions and 47 deletions

View file

@ -11,6 +11,9 @@ insert_final_newline = true
[*.yaml]
indent_size = 2
[*.exs]
indent_size = 2
# possibly sql dumps
[*.sql]
indent_size = unset

View file

@ -6,11 +6,6 @@ easily find available ports for other bundles.
## TCP
Rule of thumb: keep ports below 10000 free for stuff that reserves ports.
| Port range | reserved for |
| ----------- | ------------ |
| 200.. | Matrix |
| 220.. | Generic Web services |
| Port | bundle | usage |
| ----------- | -------------------- | ----- |
| 22 | | sshd |
@ -38,6 +33,7 @@ Rule of thumb: keep ports below 10000 free for stuff that reserves ports.
| 20080 | matrix-synapse | client, federation |
| 20081 | matrix-synapse | prometheus metrics |
| 20090 | matrix-media-repo | media_repo |
| 21000 | pleroma | pleroma |
| 22000 | gitea | gitea |
| 22010 | jenkins-ci | Jenkins CI |
| 22020 | travelynx | Travelynx Web |

View file

@ -94,7 +94,6 @@ pkg_apt = {
'lsof': {},
'mailutils': {},
'manpages': {},
'molly-guard': {},
'moreutils': {},
'mount': {},
'mtr': {},

View file

@ -0,0 +1,39 @@
# Beware! This file is rewritten by htop when settings are changed in the interface.
# The parser is also very primitive, and not human-friendly.
fields=0 48 17 18 38 39 40 2 46 47 49 1
sort_key=46
sort_direction=1
tree_sort_key=0
tree_sort_direction=1
hide_kernel_threads=1
hide_userland_threads=0
shadow_other_users=0
show_thread_names=0
show_program_path=1
highlight_base_name=1
highlight_megabytes=0
highlight_threads=1
highlight_changes=0
highlight_changes_delay_secs=5
find_comm_in_cmdline=1
strip_exe_from_cmdline=1
show_merged_command=0
tree_view=0
tree_view_always_by_pid=0
header_margin=1
detailed_cpu_time=1
cpu_count_from_one=1
show_cpu_usage=1
show_cpu_frequency=0
show_cpu_temperature=0
degree_fahrenheit=0
update_process_names=0
account_guest_in_cpu_meter=0
color_scheme=0
enable_mouse=0
delay=10
left_meters=Tasks LoadAverage Uptime Memory CPU LeftCPUs CPU
left_meter_modes=2 2 2 1 1 1 2
right_meters=Hostname Load RightCPUs
right_meter_modes=2 3 1
hide_function_bar=0

View file

@ -10,6 +10,9 @@ files = {
'/etc/hosts': {
'content_type': 'mako',
},
'/etc/htoprc.global': {
'source': 'htoprc',
},
'/etc/locale.gen': {
'content_type': 'mako',
'triggers': {

View file

@ -1,4 +1,7 @@
defaults = {
'bash_functions': {
'h': 'cp /etc/htoprc.global ~/.htoprc; mkdir -p ~/.config/htop; cp /etc/htoprc.global ~/.config/htop/htoprc; htop',
},
'locale': {
'default': 'en_US.UTF-8',
'installed': {

View file

@ -42,6 +42,13 @@ defaults = {
'ICINGA STATUSMONITOR': {
'command_on_monitored_host': '/usr/local/share/icinga/plugins/check_systemd_unit icinga_statusmonitor',
},
'IDO-PGSQL': {
'check_command': 'ido',
'vars.ido_type': 'IdoPgsqlConnection',
'vars.ido_name': 'ido-pgsql',
'vars.ido_pending_queries_warning': 25,
'vars.ido_pending_queries_critical': 50,
},
},
},
},

View file

@ -3,7 +3,8 @@ defaults = {
'repos': {
'deb-multimedia': {
'items': {
'deb https://ftp-stud.hs-esslingen.de/pub/Mirrors/debian-multimedia/ stable main',
'deb https://ftp-stud.hs-esslingen.de/pub/Mirrors/debian-multimedia/ {os_release} main',
'deb https://ftp-stud.hs-esslingen.de/pub/Mirrors/debian-multimedia/ {os_release}-backports main',
},
},
},
@ -12,6 +13,7 @@ defaults = {
'fonts-noto': {},
'fonts-roboto': {},
'kodi': {},
'kodi-inputstream-adaptive': {},
'libasound2': {},
'libcec4': {},
'ttf-mscorefonts-installer': {},

View file

@ -1,5 +1,3 @@
${node.metadata['hostname']}
% for domain, aliases in sorted(node.metadata.get('letsencrypt/domains', {}).items()):
${domain} ${' '.join(sorted(aliases))}
% endfor

View file

@ -6,7 +6,7 @@ just_check=$2
cert_path="/var/lib/dehydrated/certs/$domain"
already_exists=false
if [ -f "$cert_path/privkey.pem" -a -f "$cert_path/fullchain.pem" ]
if [ -f "$cert_path/privkey.pem" -a -f "$cert_path/fullchain.pem" -a -f "$cert_path/chain.pem" ]
then
already_exists=true
fi
@ -23,6 +23,7 @@ fi
if [ "$already_exists" != true ]
then
rm -r "$cert_path"
mkdir -p "$cert_path"
openssl req -x509 -newkey rsa:4096 -nodes -days 3650 -subj "/CN=$domain" -keyout "$cert_path/privkey.pem" -out "$cert_path/fullchain.pem"
chmod 0600 "$cert_path/privkey.pem"

View file

@ -41,6 +41,7 @@ git_deploy = {
'rev': node.metadata['matrix-media-repo']['version'],
'triggers': {
'action:matrix-media-repo_build',
'svc_systemd:matrix-media-repo:restart',
},
},
}

View file

@ -0,0 +1,9 @@
#!/bin/bash
# Checks wether upgrade-and-reboot is currently running.
if [[ -f "/var/lib/bundlewrap/soft-${node.name}/UNATTENDED" ]]
then
echo "Sorry, can't $MOLLYGUARD_CMD now, upgrade-and-reboot is running"
exit 1
fi

View file

@ -0,0 +1,29 @@
#!/bin/sh
# This script will ask for the bundlewrap node name. This replaces the
# original script, which will ask for the hostname, which sometimes
# is not enough to properly identify the system.
NODE_NAME="${node.name}"
# If this is not a terminal, do nothing
test -t 0 || exit 0
sigh()
{
echo "Sorry, input does not match. Won't $MOLLYGUARD_CMD $NODE_NAME ..." >&2
exit 1
}
trap 'echo;sigh' 1 2 3 9 10 12 15
echo -n "Please enter the bundlewrap node name of this System to $MOLLYGUARD_CMD: "
read NODE_NAME_USER || :
NODE_NAME_USER="$(echo "$NODE_NAME_USER" | tr '[:upper:]' '[:lower:]')"
[ "$NODE_NAME_USER" = "$NODE_NAME" ] || sigh
trap - 1 2 3 9 10 12 15
exit 0

View file

@ -0,0 +1 @@
# currently unused

View file

@ -0,0 +1,21 @@
directories = {
'/etc/molly-guard/messages.d': {
'purge': True,
},
'/etc/molly-guard/run.d': {
'purge': True,
},
}
files = {
'/etc/molly-guard/rc': {},
'/etc/molly-guard/run.d/10-check-unattended-upgrades': {
'content_type': 'mako',
'mode': '0755',
},
'/etc/molly-guard/run.d/30-query-hostname': {
'content_type': 'mako',
'mode': '0755',
},
}

View file

@ -0,0 +1,7 @@
defaults = {
'apt': {
'packages': {
'molly-guard': {},
},
},
}

View file

@ -1,3 +1,8 @@
if node.has_bundle('pacman'):
package = 'pkg_pacman:nfs-utils'
else:
package = 'pkg_apt:nfs-common'
for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
data['mount'] = mount
data['mount_options'] = set(data.get('mount_options', set()))
@ -34,7 +39,7 @@ for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
'file:/etc/systemd/system/{}.mount'.format(unitname),
'file:/etc/systemd/system/{}.automount'.format(unitname),
'directory:{}'.format(data['mountpoint']),
'pkg_apt:nfs-common',
package,
},
}
else:
@ -42,7 +47,7 @@ for mount, data in node.metadata.get('nfs-client/mounts',{}).items():
'needs': {
'file:/etc/systemd/system/{}.mount'.format(unitname),
'directory:{}'.format(data['mountpoint']),
'pkg_apt:nfs-common',
package,
},
}

View file

@ -4,4 +4,9 @@ defaults = {
'nfs-common': {},
},
},
'pacman': {
'packages': {
'nfs-utils': {},
},
},
}

View file

@ -35,5 +35,24 @@ http {
'' close;
}
# GDPR compatible IP smashinator 5000000
map $remote_addr $ip_anonym1 {
default 0.0.0;
"~(?P<ip>(\d+)\.(\d+))\.(\d+)\.\d+" $ip;
"~(?P<ip>[^:]+:[^:]+):" $ip;
}
map $remote_addr $ip_anonym2 {
default .0.0;
"~(?P<ip>(\d+)\.(\d+)\.(\d+))\.\d+" .0.0;
"~(?P<ip>[^:]+:[^:]+):" ::;
}
map $ip_anonym1$ip_anonym2 $ip_anonymized {
default 0.0.0.0;
"~(?P<ip>.*)" $ip;
}
log_format gdpr '$ip_anonymized - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"<stripped>" "$http_user_agent"';
include /etc/nginx/sites/*;
}

View file

@ -0,0 +1,35 @@
import Config
config :pleroma,
configurable_from_database: true
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "${node.metadata['pleroma']['url']}", scheme: "https", port: 443],
http: [port: 21000, ip: {127, 0, 0, 1}],
secret_key_base: "${node.metadata['pleroma']['secret_key']}",
secure_cookie_flag: true
config :pleroma, :instance,
static_dir: "/var/pleroma/static/"
config :pleroma, Pleroma.Upload,
uploader: Pleroma.Uploaders.Local,
filters: [Pleroma.Upload.Filter.Dedupe]
config :pleroma, Pleroma.Uploaders.Local,
uploads: "/var/pleroma/uploads/"
config :pleroma, :media_proxy,
enabled: false,
redirect_on_failure: true
#base_url: "https://cache.pleroma.social"
# Configure your database
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: "pleroma",
password: "${node.metadata['postgresql']['roles']['pleroma']['password']}",
database: "pleroma",
hostname: "localhost",
pool_size: 10,
timeout: 60000

View file

@ -0,0 +1,22 @@
[Unit]
Description=Pleroma social network
After=network.target
Requires=postgresql.service
[Service]
User=pleroma
WorkingDirectory=/opt/pleroma
Environment="HOME=/opt/pleroma"
Environment="PLEROMA_CONFIG_PATH=/opt/pleroma/pleroma.config.exs"
Environment="PLUG_TMPDIR=/tmp/pleroma"
ExecStart=/opt/pleroma/release/bin/pleroma start
ExecStop=/opt/pleroma/release/bin/pleroma stop
Restart=on-failure
PrivateTmp=true
ProtectHome=true
ProtectSystem=full
CapabilityBoundingSet=~CAP_SYS_ADMIN
[Install]
WantedBy=multi-user.target

88
bundles/pleroma/items.py Normal file
View file

@ -0,0 +1,88 @@
version = node.metadata['pleroma']['version']
users = {
'pleroma': {
'home': '/opt/pleroma',
},
}
directories = {
'/opt/pleroma': {},
'/var/pleroma': {
'owner': 'pleroma',
},
'/var/pleroma/uploads': {
'owner': 'pleroma',
},
'/var/pleroma/static': {
'owner': 'pleroma',
},
'/var/pleroma/static/emoji': {
'owner': 'pleroma',
},
}
if node.has_bundle('zfs'):
directories['/var/pleroma']['needs'] = {
'zfs_dataset:tank/pleroma-data',
}
actions = {
'pleroma_download_release': {
'command': \
'cd /opt/pleroma/ && '\
f'wget -O/opt/pleroma/pleroma.zip https://git.pleroma.social/api/v4/projects/2/jobs/artifacts/release/{version}/download?job=amd64 && '\
'rm -rf release && '\
'unzip /opt/pleroma/pleroma.zip && '\
'chown -R pleroma:pleroma /opt/pleroma/release && '\
f'echo -n "{version}" > /opt/pleroma/.bundlewrap_installed_version',
'unless': f'[ "$(cat /opt/pleroma/.bundlewrap_installed_version)" = "{version}" ]',
'needs': {
'directory:/opt/pleroma',
},
'preceded_by': {
'svc_systemd:pleroma:stop',
},
'triggers': {
'action:pleroma_migrate_database',
'svc_systemd:pleroma:restart',
},
},
'pleroma_migrate_database': {
'triggered': True,
'command': \
'echo "CREATE EXTENSION IF NOT EXISTS citext;" | psql pleroma && '\
'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | psql pleroma && '\
'echo "CREATE EXTENSION IF NOT EXISTS \\\"uuid-ossp\\\";" | psql pleroma && '\
'sudo -u pleroma PLEROMA_CONFIG_PATH=/opt/pleroma/pleroma.config.exs /opt/pleroma/release/bin/pleroma_ctl create',
'needs': {
'postgres_db:pleroma',
},
},
}
files = {
'/etc/systemd/system/pleroma.service': {
'triggers': {
'action:systemd-reload',
'svc_systemd:pleroma:restart',
},
},
'/opt/pleroma/pleroma.config.exs': {
'content_type': 'mako',
'triggers': {
'svc_systemd:pleroma:restart',
},
},
}
svc_systemd = {
'pleroma': {
'needs': {
'action:pleroma_download_release',
'action:pleroma_migrate_database',
'file:/etc/systemd/system/pleroma.service',
'file:/opt/pleroma/pleroma.config.exs',
},
},
}

View file

@ -0,0 +1,59 @@
defaults = {
'apt': {
'packages': {
'imagemagick': {},
'ffmpeg': {},
'libimage-exiftool-perl': {},
},
},
'backups': {
'paths': {
'/var/pleroma',
},
},
'zfs': {
'datasets': {
'tank/pleroma-data': {
'mountpoint': '/var/pleroma',
},
},
},
'postgresql': {
'roles': {
'pleroma': {
'password': repo.vault.password_for(f'{node.name} postgresql pleroma'),
},
},
'databases': {
'pleroma': {
'owner': 'pleroma',
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts/pleroma',
)
def nginx(metadata):
if not node.has_bundle('nginx'):
raise DoNotRunAgain
return {
'nginx': {
'vhosts': {
'pleroma': {
'domain': metadata.get('pleroma/url'),
'proxy': {
'/': {
'target': 'http://127.0.0.1:21000',
'websockets': True,
},
},
'website_check_path': '/main/all',
'website_check_string': 'use Pleroma',
},
},
},
}

View file

@ -49,7 +49,7 @@ else:
'letsencrypt/reload_after',
)
def letsencrypt(metadata):
if not node.has_bundle('letsencrypt'):
if not node.has_bundle('letsencrypt') or not node.has_bundle('postfixadmin'):
raise DoNotRunAgain
result = {
@ -58,12 +58,9 @@ def letsencrypt(metadata):
},
}
myhostname = metadata.get('postfix/myhostname', None)
if myhostname and myhostname != metadata.get('hostname'):
result['domains'] = {
myhostname: set(),
}
result['domains'] = {
metadata.get('postfix/myhostname', metadata.get('hostname')): set(),
}
return {
'letsencrypt': result,

View file

@ -25,10 +25,10 @@ directories = {
},
# This is needed so the above purge does not remove the version
# currently installed.
'/etc/postgresql/{}'.format(postgresql_version): {
'owner': None,
'group': None,
'mode': None,
'/etc/postgresql/{}/main'.format(postgresql_version): {
'owner': 'postgres',
'group': 'postgres',
'mode': '0755',
},
}

View file

@ -17,10 +17,10 @@ try:
print(top_output)
# steal
if cpu_usage['st'] > 5:
crit.add('CPU steal is {}% (>5%)'.format(cpu_usage['st']))
elif cpu_usage['st'] > 2:
warn.add('CPU steal is {}% (>2%)'.format(cpu_usage['st']))
if cpu_usage['st'] > 10:
crit.add('CPU steal is {}% (>10%)'.format(cpu_usage['st']))
elif cpu_usage['st'] > 5:
warn.add('CPU steal is {}% (>5%)'.format(cpu_usage['st']))
# iowait
if cpu_usage['wa'] > 60:

View file

@ -1,11 +1,5 @@
assert node.has_bundle('systemd')
pkg_apt = {
'resolvconf': {
'installed': False,
},
}
files = {
'/etc/network/interfaces': {
'delete': True,

View file

@ -1,3 +1,14 @@
defaults = {
'apt': {
'packages': {
'resolvconf': {
'installed': False,
},
},
},
}
@metadata_reactor.provides(
'interfaces',
)

View file

@ -5,8 +5,10 @@ server:
verbosity: 0
% if node.has_bundle('netdata'):
statistics-interval: 1
extended-statistics: yes
# FIXME reenable this once debian has 1.19
# statistics-interval: 1
# extended-statistics: yes
statistics-interval: 300
% else:
statistics-interval: 300
% endif

View file

@ -19,6 +19,7 @@ actions = {
'zfs_dataset:',
'zfs_pool:',
},
'comment': 'If this fails, do a dist-upgrade, reinstall zfs-dkms, reboot',
},
}

View file

@ -0,0 +1 @@
encrypt$gAAAAABgMXllIGiB__clFctfOC6T4qRhFDrh_WJZU745-DZef2UpKCy0gz_2FlDAIqrNceL-Ahz1AXZrsdHUKPYAZ5AW4ne0b0G6uHQENYB0xv-ZqA3MZS26gzvNM7ejhyTCM1zO1j6ePgIxfZlaalNcuLIRAphuhu7KkJA8sGaoUMjdTqVWJUjj4Le8KHcS-s7PhB1XjkyHYxb0cKFgPxs1CgHWVjfCviVnl3yFAF1aLvYsbNcpzM_RGGIIA9YsO3yPQ8Mfk4B3truuNg1mdNaunpnhoTImF2cSNoI64f2mVaSNxxRXm1NG2qUJkZN8ZQlW8k7A1w_zUwHw9-JaimZejfPWrhew7krAbPQWEqOz7Km0RkQdbzFzxWECDIOQ_Z87n_yEFLSN3sAHA0eQ-a6oqj5Ybga5p9eeNNdOYAZyU_6KfSl9U6XSKT16brAXnsZevWQHk06ObdOPhJW5SMIQwk0TZXUOMZ11T0o0-2IMGBngOjoOxqt7gjZoiLFt4c8BkFcDkpTj25asyG2iF-2jWZ1cY91F5nDkIE3CSQzD7DYANyTI7ik9qACiY25bBYOwo9HS9TEcE-wDS2_jKolFFmEx5EFdxzIpSXdWB7EznbizgqAtu2eYubASKlBKILpeVZiqKZi8

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK7vz3CMmmHmnWZs4b+Ohh4wnUgcME8PvZscjgS+91Qd kunsi@kunsi-t470.kunbox.net

View file

@ -0,0 +1,4 @@
location /
{
add_header Access-Control-Allow-Origin *;
}

View file

@ -0,0 +1,4 @@
client_max_body_size 16m;
access_log /var/log/nginx/pleroma.log gdpr;
error_log /var/log/nginx/error.log;

View file

@ -0,0 +1 @@
return 308 https://cybert-media.net$request_uri;

View file

@ -0,0 +1,9 @@
${header}
$ORIGIN cybert-media.net.
@ IN A 159.69.11.231
IN AAAA 2a01:4f8:c2c:c410::1
IN TXT "v=spf1 a ~all"
www IN CNAME cybert-media.net.

View file

@ -0,0 +1 @@
export PS1='\[\e[1;34m\][\[\e[1;32m\]'"$__node_name"'\[\e[1;34m\]][\[\e[1;32m\]\u\[\e[1;34m\]@\[\e[1;32m\]\w\[\e[1;34m\]] $PRODWARNING> \[\e[0m\]'

View file

@ -32,7 +32,7 @@ set -g status-left-length 14
set -g status-right-length 140
#set -g status-left '#[default]〘 '
set -g status-left '#[fg=green,bright]#(uname -r | cut -c 1-8)#[default]〘'
set -g status-right "〙🔋 #(upower -i /org/freedesktop/UPower/devices/battery_BAT1 | grep time | awk '{print $4, $5}' | sed 's/hours/h/; s/minutes/m/; s/,/\./') #[fg=red,bg=default]⇑#(uptime -p |sed 's/\ week/w/; s/\ days/d/; s/\ day/d/; s/\ hours/h/; s/\ minutes/m/; s/\ minute/m/; s/,//g; s/up//') #[fg=green,bg=default]⎋ #(cat /proc/loadavg | awk '{print $1,$2,$3}') #[fg=blue] %Y-%m-%d #[fg=white,bg=default] %H:%M #[fg=green] #H"
set -g status-right "〙#[fg=red,bg=default]⇑#(uptime -p |sed 's/\ week/w/; s/\ days/d/; s/\ day/d/; s/\ hours/h/; s/\ minutes/m/; s/\ minute/m/; s/,//g; s/up//') #[fg=green,bg=default]⎋ #(cat /proc/loadavg | awk '{print $1,$2,$3}') #[fg=blue] %Y-%m-%d #[fg=white,bg=default] %H:%M #[fg=green] #H"
# C-b is not acceptable -- Vim uses it
set-option -g prefix C-a

View file

@ -21,6 +21,7 @@ groups['linux'] = {
'backup-client',
'basic',
'cron',
'molly-guard',
'openssh',
'postfix',
'sshmon',

View file

@ -150,7 +150,6 @@ nodes['home.router'] = {
'kunsi': {
'ssh_pubkey': {
# work laptop
'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM68t1Ssf0c9dEkYOEXllUQ0aybPsW3aQAJuWpUHPlt',
'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICYst1HK+gJYhNxzqJGnz4iB73pa89Xz2yH+8wufOcsA',
},
},
@ -172,7 +171,7 @@ nodes['home.router'] = {
},
'wireguard': {
# TODO autogenerate?
'my_ip': '172.19.137.2/32',
'my_ip': '172.19.136.2/32',
'subnets': {
'172.19.138.0/24',
'172.19.139.0/24',

View file

@ -0,0 +1,97 @@
# sophie's miniserver
# mostly unmanaged
nodes['htz-cloud.miniserver'] = {
'groups': {
'debian-buster',
'webserver',
},
'bundles': {
'iptables',
},
'metadata': {
'dummy': True,
'interfaces': {
'eth0': {
'ips': {
'157.90.20.62',
'2a01:4f8:c2c:840f::1/64',
},
'gateway4': '172.31.1.1',
'gateway6': 'fe80::1',
},
},
'apt': {
'packages': {
'mosh': {},
'weechat': {},
'weechat-core': {},
'weechat-curses': {},
'weechat-perl': {},
'weechat-plugins': {},
'weechat-python': {},
'weechat-ruby': {},
},
'repos': {
'weechat': {
'items': {
'deb https://weechat.org/debian {os_release} main',
},
},
},
},
'backups': {
'exclude_from_backups': True,
},
'icinga_options': {
'exclude_from_monitoring': True,
},
'iptables': {
'custom_rules': [
'iptables_both -A INPUT -p udp --dport 60000:61000 -j ACCEPT', # mosh
'iptables_both -A INPUT -p tcp --dport 9001 -j ACCEPT', # weechat
],
},
'letsencrypt': {
'concat_and_deploy': {
'sophie-weechat': {
'match_domain': 'i.sophies-kitchen.eu',
'target': '/home/sophie/.weechat/ssl/relay.pem',
'chown': 'sophie:sophie',
'chmod': '0440',
'commands': [
'echo \'core.weechat */relay sslcertkey\' >> /home/sophie/.weechat/weechat_fifo'
],
},
},
'domains': {
'i.sophies-kitchen.eu': set(),
kunsi marked this conversation as resolved
Review

Technically redundant, since there's a nginx vhost which will use the same domain.

Technically redundant, since there's a nginx vhost which will use the same domain.
'webdump.sophies-kitchen.eu': set(),
},
},
'nginx': {
'vhosts': {
'webdump.sophies-kitchen.eu': {
'webroot_config': {
'owner': 'sophie',
'group': 'sophie',
'mode': '0755',
},
'extras': True,
},
},
},
'vm': {
'cpu': 2,
'ram': 4,
Outdated
Review

The group debian-buster adds bundle:users, which in turn gets its user defaults from users.json. Maybe you want to delete the user kunsi instead?

The group `debian-buster` adds `bundle:users`, which in turn gets its user defaults from `users.json`. Maybe you want to delete the user `kunsi` instead?
},
'users': {
'sophie': {
'ssh_pubkey': [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDILcYrMQNRVXAm5L+7No1ZumqfCyRc1QZmTY3O7Q8hsE4+fCAvwsWm2aSMfLL3NnIl8Nm1Rixzic5jdYKYNIY3SlX1wvTB+MhGb2eyVSd7c/Y98aCLSlDkQ2sebjpdA1FoJOeGD3qxqDwj0+KckXU2ZaSSQY7CxVsjH65UxCHqVAg+6uLdNbj7j850s1B9NXVXef+sBQ5jUngXxnqQWwNh2Mn8auwumkeEG4SYf96wyFkLvmBitOng/GyLWl9YPnXXHHDnatcVipy7y34qw4CQ4P84anecbA+Bqr9IcxBW6qYmYgRKEnAcmEfjQd+BI1gCLB1BBEmb/qp+mVLd4tOh sophie@carbon"
],
},
},
},
}

View file

@ -56,7 +56,7 @@ nodes['htz-cloud.pirmasens'] = {
'message_size_limit_mb': 50,
},
'postfixadmin': {
'version': '3.3.7',
'version': '3.3.8',
'setup_password': vault.decrypt('encrypt$gAAAAABfpwn8NKxTztI39GzhGw66NNsWa72Wq7Sa_LoIG_L0ewCVPzhmw93xhWo3jfT8hCn9sqJgbArmPHtLMcLkSHdBPbQe0bLZMSib-mA9sEQD0wgKMyuRCPHIIMKSAoMaJaYnHSTO-mz1q7_tKzd6LkHF_AGsboS1vpQvg-CDth6e0msTwe8='),
},
'rspamd': {

View file

@ -0,0 +1,56 @@
nodes['htz-cloud.pleroma'] = {
'bundles': {
'pleroma',
'postgresql',
'zfs',
},
'groups': {
'debian-buster',
'webserver',
},
'metadata': {
'interfaces': {
'eth0': {
'ips': {
'159.69.11.231',
'2a01:4f8:c2c:c410::1/64',
},
'gateway4': '172.31.1.1',
'gateway6': 'fe80::1',
},
},
'cron': {
'auto-authorize-sm-users': '* * * * * root echo "UPDATE users SET approval_pending=false WHERE email LIKE \'\\%@seibert-media.net\' AND approval_pending=true;" | psql pleroma >/dev/null',
},
'nginx': {
'vhosts': {
'pleroma': {
'extras': True,
},
'pleroma-www-redir': {
'domain': 'www.cybert-media.net',
'extras': True,
},
},
},
'pleroma': {
'version': '2.2.2',
'url': 'cybert-media.net',
'secret_key': vault.decrypt('encrypt$gAAAAABgMVXXclfxVY022fM0Fdf94Oh3sxVlK0lYyBO_CsQFEbZcMua3w1oJY8_9d1JcrCJSSeBRTDnt-ZkRCQ6xKoALo8Rl7s9DPxa7J0vHdkggeZ3IHaOyXBcBPdx8vILyKDLHRXacaynOUBOjy6RIl6Qf2wH1ASbphCcjD-Njricg4PG6Rcixm87fF60rLBjAAkRoz5ZQnXlut1rhjLj-z-7UpA68fkeyPVJXbroWBJdmvCUt92dwjuGARsku2XI22mVvjtJJ'),
},
'postfix': {
'myhostname': 'cybert-media.net',
},
'vm': {
'cpu': 1,
'ram': 2,
},
'zfs': {
'pools': {
'tank': {
'device': '/dev/sdb',
},
},
},
},
}

View file

@ -49,6 +49,7 @@ nodes['htz.ex42-1048908'] = {
# No need to create a bundle just to install packages,
# configs will be managed by users nevertheless.
'mosh': {},
'weechat': {},
'weechat-core': {},
'weechat-curses': {},
@ -89,7 +90,7 @@ nodes['htz.ex42-1048908'] = {
},
'element-web': {
'url': 'chat.franzi.business',
'version': 'v1.7.21',
'version': 'v1.7.22',
'config': {
'default_server_config': {
'm.homeserver': {
@ -112,8 +113,8 @@ nodes['htz.ex42-1048908'] = {
},
},
'gitea': {
'version': '1.13.2',
'sha256': '4d7d3fc63666cc9c94e32c1e70422c30c1ee8f905004eeb7cd812051721601cc',
'version': '1.13.4',
'sha256': '7948a5ad2ec63d4cb1bf3f90925f444606bd00af4e242d467ae975ade4e330d7',
'domain': 'git.kunsmann.eu',
# TODO find out if those secrets can be rotated without breaking stuff
'internal_token': vault.decrypt('encrypt$gAAAAABfPncYwCX-NdBr9LdxLyGqmjRJqhmwMnWsdZy6kVOWdKrScW78xaqbJ1tpL1J4qa2hcZ7TQj3l-2mkyJNJOenGzU3TsI-gYMj9vC4m8Bhur5zboxjD4dQXaJbD1WSyHJ9sPJYsWP3Gjg6I19xeq9xMlAI6xaS9vOfuoI8nZnnQPx1NjfQEj03Jxf8a0-3F20sfICst1xRa5K48bpq1PFkK_oRojg=='),
@ -147,7 +148,7 @@ nodes['htz.ex42-1048908'] = {
},
},
'matrix-media-repo': {
'version': 'v1.2.2',
'version': 'v1.2.4',
'homeservers': {
'franzi.business': {
'domain': 'http://[::1]:20080/',
@ -330,7 +331,7 @@ nodes['htz.ex42-1048908'] = {
'message_size_limit_mb': 50,
},
'postfixadmin': {
'version': '3.3.7',
'version': '3.3.8',
'setup_password': vault.decrypt('encrypt$gAAAAABfpwn8NKxTztI39GzhGw66NNsWa72Wq7Sa_LoIG_L0ewCVPzhmw93xhWo3jfT8hCn9sqJgbArmPHtLMcLkSHdBPbQe0bLZMSib-mA9sEQD0wgKMyuRCPHIIMKSAoMaJaYnHSTO-mz1q7_tKzd6LkHF_AGsboS1vpQvg-CDth6e0msTwe8='),
},
'radicale': {

View file

@ -5,6 +5,7 @@ nodes['kunsi-t470'] = {
'bundles': {
'basic',
'lldp',
'nfs-client',
'pacman',
'openssh',
'sudo',
@ -36,6 +37,18 @@ nodes['kunsi-t470'] = {
'locale': {
'default': 'en_DK.UTF-8',
},
'nfs-client': {
'mounts': {
'nas-storage': {
'mountpoint': '/mnt/nas',
'serverpath': '172.19.138.20:/storage/nas',
'mount_options': {
'retry=0',
'ro',
},
},
},
},
'pacman': {
'packages': {
'fish': {},
@ -48,6 +61,8 @@ nodes['kunsi-t470'] = {
'kunsi': {
'password': vault.decrypt('encrypt$gAAAAABgLmmuQGRUStrQawoPee-758emIYn2u8-8ebrgzNAFSp7ifeFDdXXvs-zL3QogwNYlCtBHboH2xfy1rSj6OF5bbNO-tg=='),
'shell': '/usr/bin/fish',
# FIXME move qemu VMs out of /home/kunsi
'home-mode': '0755',
},
'sophie': {
'delete': True,

View file

@ -114,7 +114,7 @@ nodes['ovh.icinga2'] = {
'service_filter': '"checks_with_sms" in service.groups'
},
'wireguard': {
'my_ip': '172.19.137.3/32',
'my_ip': '172.19.136.3/32',
'peers': {
'ovh.wireguard': {},
},

View file

@ -25,12 +25,12 @@ nodes['ovh.wireguard'] = {
},
'wireguard': {
'network': '172.19.136.0/22',
'my_ip': '172.19.137.1/32',
'my_ip': '172.19.136.1/32',
'psk': vault.random_bytes_as_base64_for('ovh.icinga2 wireguard psk'),
'peers': {
'kunsi-oneplus3': {
'ips': {
'172.19.137.200/32',
'172.19.136.100/32',
},
'psk': vault.decrypt('encrypt$gAAAAABgKYeeuPfokbk7lSbbJX-52kap5Cs3tdCHpezkKcExV-yLTHPjszIcAh1T9wW1BtGElRdZea7VTikV3qEu3bupiSqEW4l2lmD5cn2ERYRfuVCoYSkOlmEGokHUX7Nja4G_A2_x'),
'pubkey': vault.decrypt('encrypt$gAAAAABgKYdTqLG3DcB13QqQadUxyzIjvSxwgZQNjorQi-ADSLsNdDbhikSAGQnSmGelLB74V175awIIir768WEnpLJUKX6nt_i2BxOP3JazvKZSQECkiK8G-IRn8wWWgKarfmtqRwh6'),