bundles: code style improvements
All checks were successful
bundlewrap/pipeline/head This commit looks good
All checks were successful
bundlewrap/pipeline/head This commit looks good
This commit is contained in:
parent
2af911c29f
commit
f52df58517
24 changed files with 80 additions and 95 deletions
|
@ -43,4 +43,3 @@ svc_systemd = {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,12 +19,12 @@ def get_static_allocations(metadata):
|
||||||
if rnode.metadata.get('location', '') != metadata.get('location', ''):
|
if rnode.metadata.get('location', '') != metadata.get('location', ''):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for identifier, interface in rnode.metadata.get('interfaces', {}).items():
|
for iface_name, iface_config in rnode.metadata.get('interfaces', {}).items():
|
||||||
if interface.get('dhcp', False):
|
if iface_config.get('dhcp', False):
|
||||||
try:
|
try:
|
||||||
allocations[rnode.name] = {
|
allocations[f'{rnode.name}_{iface_name}'] = {
|
||||||
'ipv4': sorted(interface['ips'])[0],
|
'ipv4': sorted(iface_config['ips'])[0],
|
||||||
'mac': interface['mac'],
|
'mac': iface_config['mac'],
|
||||||
}
|
}
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
@ -41,7 +41,7 @@ def get_static_allocations(metadata):
|
||||||
)
|
)
|
||||||
def get_listen_interfaces(metadata):
|
def get_listen_interfaces(metadata):
|
||||||
listen_interfaces = []
|
listen_interfaces = []
|
||||||
for identfier, subnet in node.metadata.get('dhcpd/subnets', {}).items():
|
for _, subnet in node.metadata.get('dhcpd/subnets', {}).items():
|
||||||
listen_interfaces.append(subnet['interface'])
|
listen_interfaces.append(subnet['interface'])
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -55,15 +55,15 @@ def get_listen_interfaces(metadata):
|
||||||
'iptables/bundle_rules/dhcpd',
|
'iptables/bundle_rules/dhcpd',
|
||||||
)
|
)
|
||||||
def iptables(metadata):
|
def iptables(metadata):
|
||||||
iptables = set()
|
rules = set()
|
||||||
for identfier, subnet in node.metadata.get('dhcpd/subnets', {}).items():
|
for _, subnet in node.metadata.get('dhcpd/subnets', {}).items():
|
||||||
iptables.add('iptables -A INPUT -i {} -p udp --dport 67:68 -j ACCEPT'.format(subnet['interface']))
|
rules.add('iptables -A INPUT -i {} -p udp --dport 67:68 -j ACCEPT'.format(subnet['interface']))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'iptables': {
|
'iptables': {
|
||||||
'bundle_rules': {
|
'bundle_rules': {
|
||||||
# iptables bundle relies on this being a list.
|
# iptables bundle relies on this being a list.
|
||||||
'dhcpd': sorted(list(iptables)),
|
'dhcpd': sorted(list(rules)),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
from flask import Flask, Response
|
|
||||||
from subprocess import check_output
|
from subprocess import check_output
|
||||||
|
|
||||||
|
from flask import Flask
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
|
|
||||||
@app.route('/status')
|
@app.route('/status')
|
||||||
|
@ -25,5 +26,5 @@ def statuspage():
|
||||||
|
|
||||||
if icinga_is_fine and postgres_is_fine:
|
if icinga_is_fine and postgres_is_fine:
|
||||||
return 'OK', 200
|
return 'OK', 200
|
||||||
else:
|
|
||||||
return 'Something is wrong!', 500
|
return 'Something is wrong!', 500
|
||||||
|
|
|
@ -264,13 +264,13 @@ icinga_run_deps = {
|
||||||
'pkg_apt:icinga2',
|
'pkg_apt:icinga2',
|
||||||
'pkg_apt:icinga2-ido-pgsql',
|
'pkg_apt:icinga2-ido-pgsql',
|
||||||
}
|
}
|
||||||
for name in actions.keys():
|
for name in actions:
|
||||||
icinga_run_deps.add(f'action:{name}')
|
icinga_run_deps.add(f'action:{name}')
|
||||||
for name in directories.keys():
|
for name in directories:
|
||||||
icinga_run_deps.add(f'directory:{name}')
|
icinga_run_deps.add(f'directory:{name}')
|
||||||
for name in files.keys():
|
for name in files:
|
||||||
icinga_run_deps.add(f'file:{name}')
|
icinga_run_deps.add(f'file:{name}')
|
||||||
for name in symlinks.keys():
|
for name in symlinks:
|
||||||
icinga_run_deps.add(f'symlink:{name}')
|
icinga_run_deps.add(f'symlink:{name}')
|
||||||
|
|
||||||
svc_systemd = {
|
svc_systemd = {
|
||||||
|
|
|
@ -19,7 +19,6 @@ defaults = {
|
||||||
|
|
||||||
# needed for check_rbl
|
# needed for check_rbl
|
||||||
'libdata-validate-ip-perl': {},
|
'libdata-validate-ip-perl': {},
|
||||||
'libdata-validate-ip-perl': {},
|
|
||||||
'libmonitoring-plugin-perl': {},
|
'libmonitoring-plugin-perl': {},
|
||||||
'libnet-dns-perl': {},
|
'libnet-dns-perl': {},
|
||||||
'libreadonly-perl': {},
|
'libreadonly-perl': {},
|
||||||
|
|
|
@ -38,7 +38,7 @@ for bundle, rules in node.metadata.get('iptables', {}).get('bundle_rules', {}).i
|
||||||
}
|
}
|
||||||
|
|
||||||
if 'custom_rules' in node.metadata.get('iptables', {}):
|
if 'custom_rules' in node.metadata.get('iptables', {}):
|
||||||
files[f'/etc/iptables-rules.d/40-custom'] = {
|
files['/etc/iptables-rules.d/40-custom'] = {
|
||||||
'content': '\n'.join(node.metadata['iptables']['custom_rules']) + '\n',
|
'content': '\n'.join(node.metadata['iptables']['custom_rules']) + '\n',
|
||||||
'triggers': {
|
'triggers': {
|
||||||
'action:iptables_enforce',
|
'action:iptables_enforce',
|
||||||
|
|
|
@ -21,19 +21,19 @@ defaults = {
|
||||||
)
|
)
|
||||||
def iptables(metadata):
|
def iptables(metadata):
|
||||||
interfaces = metadata.get('netdata/restrict-to-interfaces', set())
|
interfaces = metadata.get('netdata/restrict-to-interfaces', set())
|
||||||
iptables = []
|
rules = []
|
||||||
|
|
||||||
if len(interfaces):
|
if interfaces:
|
||||||
for iface in sorted(interfaces):
|
for iface in sorted(interfaces):
|
||||||
iptables.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 19999 -j ACCEPT')
|
rules.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 19999 -j ACCEPT')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
iptables.append('iptables_both -A INPUT -p tcp --dport 19999 -j ACCEPT')
|
rules.append('iptables_both -A INPUT -p tcp --dport 19999 -j ACCEPT')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'iptables': {
|
'iptables': {
|
||||||
'bundle_rules': {
|
'bundle_rules': {
|
||||||
'netdata': iptables,
|
'netdata': rules,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -147,21 +147,21 @@ def monitoring(metadata):
|
||||||
)
|
)
|
||||||
def iptables(metadata):
|
def iptables(metadata):
|
||||||
interfaces = metadata.get('nginx/restrict-to-interfaces', set())
|
interfaces = metadata.get('nginx/restrict-to-interfaces', set())
|
||||||
iptables = []
|
rules = []
|
||||||
|
|
||||||
if len(interfaces):
|
if interfaces:
|
||||||
for iface in sorted(interfaces):
|
for iface in sorted(interfaces):
|
||||||
iptables.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 80 -j ACCEPT')
|
rules.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 80 -j ACCEPT')
|
||||||
iptables.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 443 -j ACCEPT')
|
rules.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 443 -j ACCEPT')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
iptables.append('iptables_both -A INPUT -p tcp --dport 80 -j ACCEPT')
|
rules.append('iptables_both -A INPUT -p tcp --dport 80 -j ACCEPT')
|
||||||
iptables.append('iptables_both -A INPUT -p tcp --dport 443 -j ACCEPT')
|
rules.append('iptables_both -A INPUT -p tcp --dport 443 -j ACCEPT')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'iptables': {
|
'iptables': {
|
||||||
'bundle_rules': {
|
'bundle_rules': {
|
||||||
'nginx': iptables,
|
'nginx': rules,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@ if node.has_bundle('postfixadmin'):
|
||||||
'SMTP CONNECT': {
|
'SMTP CONNECT': {
|
||||||
'check_command': 'check_smtp',
|
'check_command': 'check_smtp',
|
||||||
'vars.notification.sms': True,
|
'vars.notification.sms': True,
|
||||||
'vars.notification.sms': True,
|
|
||||||
},
|
},
|
||||||
'SMTP SUBMISSION CONNECT': {
|
'SMTP SUBMISSION CONNECT': {
|
||||||
'check_command': 'check_smtp',
|
'check_command': 'check_smtp',
|
||||||
|
@ -52,7 +51,7 @@ else:
|
||||||
def fill_icinga_spam_blocklist_check_with_hostname(metadata):
|
def fill_icinga_spam_blocklist_check_with_hostname(metadata):
|
||||||
checks = {}
|
checks = {}
|
||||||
|
|
||||||
for variant, ips in repo.libs.tools.resolve_identifier(repo, node.name).items():
|
for _, ips in repo.libs.tools.resolve_identifier(repo, node.name).items():
|
||||||
for ip in ips:
|
for ip in ips:
|
||||||
if not ip.is_private:
|
if not ip.is_private:
|
||||||
checks[f'SPAM BLOCKLIST {ip}'] = {
|
checks[f'SPAM BLOCKLIST {ip}'] = {
|
||||||
|
|
|
@ -62,7 +62,7 @@ def default_postgresql_version_for_debian(metadata):
|
||||||
'version': version_to_be_installed,
|
'version': version_to_be_installed,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
else:
|
|
||||||
return {
|
return {
|
||||||
'postgresql': {
|
'postgresql': {
|
||||||
'version': version_to_be_installed,
|
'version': version_to_be_installed,
|
||||||
|
|
|
@ -171,7 +171,4 @@ if node.metadata['powerdns'].get('features', {}).get('pgsql', False):
|
||||||
'needed_by': {
|
'needed_by': {
|
||||||
'svc_systemd:pdns',
|
'svc_systemd:pdns',
|
||||||
},
|
},
|
||||||
'needed_by': {
|
|
||||||
'svc_systemd:pdns',
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
from bundlewrap.exceptions import NoSuchGroup
|
|
||||||
|
|
||||||
defaults = {
|
defaults = {
|
||||||
'apt': {
|
'apt': {
|
||||||
'packages': {
|
'packages': {
|
||||||
|
@ -68,7 +66,7 @@ def get_ips_of_secondary_nameservers(metadata):
|
||||||
ips = set()
|
ips = set()
|
||||||
for rnode in repo.nodes_in_group('dns'):
|
for rnode in repo.nodes_in_group('dns'):
|
||||||
if rnode.metadata.get('powerdns/is_secondary', False):
|
if rnode.metadata.get('powerdns/is_secondary', False):
|
||||||
for identifier, found_ips in repo.libs.tools.resolve_identifier(repo, rnode.name).items():
|
for _, found_ips in repo.libs.tools.resolve_identifier(repo, rnode.name).items():
|
||||||
ips.update({str(ip) for ip in found_ips})
|
ips.update({str(ip) for ip in found_ips})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -87,7 +85,7 @@ def get_ips_of_primary_nameservers(metadata):
|
||||||
ips = set()
|
ips = set()
|
||||||
for rnode in repo.nodes_in_group('dns'):
|
for rnode in repo.nodes_in_group('dns'):
|
||||||
if not rnode.metadata.get('powerdns/is_secondary', False):
|
if not rnode.metadata.get('powerdns/is_secondary', False):
|
||||||
for identifier, found_ips in repo.libs.tools.resolve_identifier(repo, rnode.name).items():
|
for _, found_ips in repo.libs.tools.resolve_identifier(repo, rnode.name).items():
|
||||||
ips.update({str(ip) for ip in found_ips})
|
ips.update({str(ip) for ip in found_ips})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -121,7 +119,7 @@ def generate_dns_entries_for_nodes(metadata):
|
||||||
|
|
||||||
# We're doing this once again to get the nodes which only have
|
# We're doing this once again to get the nodes which only have
|
||||||
# private ips.
|
# private ips.
|
||||||
if not ip4 and len(found_ips['ipv4']):
|
if not ip4 and found_ips['ipv4']:
|
||||||
ip4 = sorted(found_ips['ipv4'])[0]
|
ip4 = sorted(found_ips['ipv4'])[0]
|
||||||
|
|
||||||
if ip4:
|
if ip4:
|
||||||
|
|
|
@ -9,11 +9,11 @@ if node.has_bundle('pppd'):
|
||||||
}
|
}
|
||||||
|
|
||||||
# Will be started and stopped by pppd.
|
# Will be started and stopped by pppd.
|
||||||
should_be_running = None
|
SHOULD_BE_RUNNING = None
|
||||||
should_be_enabled = False
|
SHOULD_BE_ENABLED = False
|
||||||
else:
|
else:
|
||||||
should_be_running = True
|
SHOULD_BE_RUNNING = True
|
||||||
should_be_enabled = True
|
SHOULD_BE_ENABLED = True
|
||||||
|
|
||||||
files['/etc/radvd.conf'] = {
|
files['/etc/radvd.conf'] = {
|
||||||
'content_type': 'mako',
|
'content_type': 'mako',
|
||||||
|
@ -27,8 +27,8 @@ files['/etc/radvd.conf'] = {
|
||||||
|
|
||||||
svc_systemd = {
|
svc_systemd = {
|
||||||
'radvd': {
|
'radvd': {
|
||||||
'running': should_be_running,
|
'running': SHOULD_BE_RUNNING,
|
||||||
'enabled': should_be_enabled,
|
'enabled': SHOULD_BE_ENABLED,
|
||||||
'needs': {
|
'needs': {
|
||||||
'file:/etc/radvd.conf',
|
'file:/etc/radvd.conf',
|
||||||
},
|
},
|
||||||
|
|
|
@ -64,7 +64,7 @@ def populate_permitted_ips_list_with_ips_from_repo(metadata):
|
||||||
ips = set()
|
ips = set()
|
||||||
|
|
||||||
for rnode in repo.nodes:
|
for rnode in repo.nodes:
|
||||||
for identifier, found_ips in repo.libs.tools.resolve_identifier(repo, rnode.name).items():
|
for _, found_ips in repo.libs.tools.resolve_identifier(repo, rnode.name).items():
|
||||||
for ip in found_ips:
|
for ip in found_ips:
|
||||||
if not ip.is_private:
|
if not ip.is_private:
|
||||||
ips.add(str(ip))
|
ips.add(str(ip))
|
||||||
|
|
|
@ -22,7 +22,7 @@ defaults = {
|
||||||
def zfs_disks_to_metadata(metadata):
|
def zfs_disks_to_metadata(metadata):
|
||||||
disks = set()
|
disks = set()
|
||||||
|
|
||||||
for pool, config in metadata.get('zfs/pools', {}).items():
|
for _, config in metadata.get('zfs/pools', {}).items():
|
||||||
if 'device' in config:
|
if 'device' in config:
|
||||||
disks.add(config['device'])
|
disks.add(config['device'])
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
from os.path import join
|
from os.path import join
|
||||||
|
|
||||||
def collect_commands():
|
|
||||||
check_commands = {}
|
check_commands = {}
|
||||||
|
|
||||||
for bundle, bundle_config in node.metadata.get('icinga2_api', {}).items():
|
for _, bundle_config in node.metadata.get('icinga2_api', {}).items():
|
||||||
for service, service_config in bundle_config.get('services', {}).items():
|
for service, service_config in bundle_config.get('services', {}).items():
|
||||||
# The default for check_command is also set in metadata.py
|
# The default for check_command is also set in metadata.py
|
||||||
# and in icinga2 bundle
|
# and in icinga2 bundle
|
||||||
|
@ -14,8 +13,6 @@ def collect_commands():
|
||||||
):
|
):
|
||||||
check_commands[service_config['vars.sshmon_command']] = service_config['command_on_monitored_host']
|
check_commands[service_config['vars.sshmon_command']] = service_config['command_on_monitored_host']
|
||||||
|
|
||||||
return check_commands
|
|
||||||
|
|
||||||
|
|
||||||
users = {
|
users = {
|
||||||
'sshmon': {
|
'sshmon': {
|
||||||
|
@ -31,8 +28,6 @@ pkg_apt = {
|
||||||
'monitoring-plugins': {},
|
'monitoring-plugins': {},
|
||||||
}
|
}
|
||||||
|
|
||||||
check_commands = collect_commands()
|
|
||||||
|
|
||||||
with open(join(repo.path, 'data', 'sshmon', 'sshmon.pub'), 'r') as fp:
|
with open(join(repo.path, 'data', 'sshmon', 'sshmon.pub'), 'r') as fp:
|
||||||
pubkey = fp.read().strip()
|
pubkey = fp.read().strip()
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
from bundlewrap.utils import Fault
|
|
||||||
|
|
||||||
from re import sub
|
from re import sub
|
||||||
|
|
||||||
defaults = {
|
defaults = {
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
def add_vlan_infos_to_interface(metadata):
|
def add_vlan_infos_to_interface(metadata):
|
||||||
interfaces = {}
|
interfaces = {}
|
||||||
|
|
||||||
for iface, config in metadata.get('interfaces', {}).items():
|
for iface in metadata.get('interfaces', {}):
|
||||||
if not '.' in iface:
|
if not '.' in iface:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -38,17 +38,17 @@ defaults = {
|
||||||
)
|
)
|
||||||
def iptables(metadata):
|
def iptables(metadata):
|
||||||
interfaces = metadata.get('transmission/webinterface-on-interfaces', set())
|
interfaces = metadata.get('transmission/webinterface-on-interfaces', set())
|
||||||
iptables = []
|
rules = []
|
||||||
|
|
||||||
iptables.append('iptables_both -A INPUT -p udp --dport {} -j ACCEPT'.format(
|
rules.append('iptables_both -A INPUT -p udp --dport {} -j ACCEPT'.format(
|
||||||
metadata.get('transmission/config/peer-port'),
|
metadata.get('transmission/config/peer-port'),
|
||||||
))
|
))
|
||||||
iptables.append('iptables_both -A INPUT -p tcp --dport {} -j ACCEPT'.format(
|
rules.append('iptables_both -A INPUT -p tcp --dport {} -j ACCEPT'.format(
|
||||||
metadata.get('transmission/config/peer-port'),
|
metadata.get('transmission/config/peer-port'),
|
||||||
))
|
))
|
||||||
|
|
||||||
for iface in sorted(interfaces):
|
for iface in sorted(interfaces):
|
||||||
iptables.append('iptables_both -A INPUT -i {} -p tcp --dport {} -j ACCEPT'.format(
|
rules.append('iptables_both -A INPUT -i {} -p tcp --dport {} -j ACCEPT'.format(
|
||||||
iface,
|
iface,
|
||||||
metadata.get('transmission/config/rpc-port'),
|
metadata.get('transmission/config/rpc-port'),
|
||||||
))
|
))
|
||||||
|
@ -56,7 +56,7 @@ def iptables(metadata):
|
||||||
return {
|
return {
|
||||||
'iptables': {
|
'iptables': {
|
||||||
'bundle_rules': {
|
'bundle_rules': {
|
||||||
'transmission': iptables,
|
'transmission': rules,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,17 +42,16 @@ def cpu_cores_to_config_values(metadata):
|
||||||
)
|
)
|
||||||
def iptables(metadata):
|
def iptables(metadata):
|
||||||
interfaces = metadata.get('unbound/restrict-to-interfaces', set())
|
interfaces = metadata.get('unbound/restrict-to-interfaces', set())
|
||||||
iptables = []
|
rules = []
|
||||||
|
|
||||||
for iface in sorted(interfaces):
|
for iface in sorted(interfaces):
|
||||||
iptables.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 53 -j ACCEPT')
|
rules.append(f'iptables_both -A INPUT -i {iface} -p tcp --dport 53 -j ACCEPT')
|
||||||
iptables.append(f'iptables_both -A INPUT -i {iface} -p udp --dport 53 -j ACCEPT')
|
rules.append(f'iptables_both -A INPUT -i {iface} -p udp --dport 53 -j ACCEPT')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'iptables': {
|
'iptables': {
|
||||||
'bundle_rules': {
|
'bundle_rules': {
|
||||||
'unbound': iptables,
|
'unbound': rules,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ defaults = {
|
||||||
def get_default_interface(metadata):
|
def get_default_interface(metadata):
|
||||||
interfaces = sorted(metadata.get('interfaces', {}).keys())
|
interfaces = sorted(metadata.get('interfaces', {}).keys())
|
||||||
|
|
||||||
if len(interfaces):
|
if interfaces:
|
||||||
return {
|
return {
|
||||||
'vnstat': {
|
'vnstat': {
|
||||||
'interface': interfaces[0],
|
'interface': interfaces[0],
|
||||||
|
|
|
@ -17,11 +17,11 @@ if node.has_bundle('pppd'):
|
||||||
}
|
}
|
||||||
|
|
||||||
# Will be started and stopped by pppd.
|
# Will be started and stopped by pppd.
|
||||||
should_be_running = None
|
SHOULD_BE_RUNNING = None
|
||||||
should_be_enabled = False
|
SHOULD_BE_ENABLED = False
|
||||||
else:
|
else:
|
||||||
should_be_running = True
|
SHOULD_BE_RUNNING = True
|
||||||
should_be_enabled = True
|
SHOULD_BE_ENABLED = True
|
||||||
|
|
||||||
files['/etc/wide-dhcpv6/dhcp6c.conf'] = {
|
files['/etc/wide-dhcpv6/dhcp6c.conf'] = {
|
||||||
'content_type': 'mako',
|
'content_type': 'mako',
|
||||||
|
@ -48,8 +48,8 @@ files['/etc/systemd/system/wide-dhcpv6-client.service'] = {
|
||||||
|
|
||||||
svc_systemd = {
|
svc_systemd = {
|
||||||
'wide-dhcpv6-client': {
|
'wide-dhcpv6-client': {
|
||||||
'running': should_be_running,
|
'running': SHOULD_BE_RUNNING,
|
||||||
'enabled': should_be_enabled,
|
'enabled': SHOULD_BE_ENABLED,
|
||||||
'needs': {
|
'needs': {
|
||||||
'file:/etc/systemd/system/wide-dhcpv6-client.service',
|
'file:/etc/systemd/system/wide-dhcpv6-client.service',
|
||||||
'file:/etc/wide-dhcpv6/dhcp6c.conf',
|
'file:/etc/wide-dhcpv6/dhcp6c.conf',
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from json import dumps
|
from json import dumps
|
||||||
from os.path import join
|
#from os.path import join
|
||||||
|
|
||||||
from bundlewrap.metadata import MetadataJSONEncoder
|
from bundlewrap.metadata import MetadataJSONEncoder
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import re
|
#import re
|
||||||
|
|
||||||
defaults = {
|
defaults = {
|
||||||
'apt': {
|
'apt': {
|
||||||
|
@ -147,7 +147,7 @@ def monitoring(metadata):
|
||||||
|
|
||||||
services = {}
|
services = {}
|
||||||
|
|
||||||
for poolname, pool_options in metadata.get('zfs/pools').items():
|
for poolname, _ in metadata.get('zfs/pools').items():
|
||||||
services['ZFS ZPOOL ONLINE {}'.format(poolname)] = {
|
services['ZFS ZPOOL ONLINE {}'.format(poolname)] = {
|
||||||
'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_zpool_online {}'.format(poolname),
|
'command_on_monitored_host': 'sudo /usr/local/share/icinga/plugins/check_zpool_online {}'.format(poolname),
|
||||||
'vars.notification.mail': True,
|
'vars.notification.mail': True,
|
||||||
|
|
Loading…
Reference in a new issue