Compare commits

..

12 commits

Author SHA1 Message Date
mwiegand
a60503df09 wip 2022-08-31 11:36:23 +02:00
mwiegand
a9c0e91ea8 wip 2022-08-16 23:22:47 +02:00
mwiegand
dbcf21d807 wip 2022-08-16 22:39:42 +02:00
mwiegand
4c5db6abcf wip 2022-08-16 22:27:46 +02:00
mwiegand
d524734aa9 wip 2022-08-16 22:23:02 +02:00
mwiegand
3dbcf919b1 wip 2022-08-16 19:14:39 +02:00
mwiegand
cd7981cf86 wip 2022-08-13 15:35:41 +02:00
mwiegand
b478677221 wip 2022-08-13 15:35:41 +02:00
mwiegand
2904f62b41 wip 2022-08-13 15:35:41 +02:00
mwiegand
dd28bfee7b wip 2022-08-13 15:35:41 +02:00
mwiegand
9d28cf2e8e wip 2022-08-13 15:35:41 +02:00
mwiegand
01f3606f3d wip 2022-08-13 15:35:41 +02:00
323 changed files with 1720 additions and 7796 deletions

12
.envrc
View file

@ -1,7 +1,13 @@
#!/usr/bin/env bash #!/usr/bin/env bash
python3 -m venv .venv
source ./.venv/bin/activate
PATH_add .venv/bin
PATH_add bin PATH_add bin
python3 -m pip install --upgrade pip
source_env ~/.local/share/direnv/pyenv rm -rf .cache/bw/git_deploy
source_env ~/.local/share/direnv/venv export BW_GIT_DEPLOY_CACHE=.cache/bw/git_deploy
source_env ~/.local/share/direnv/bundlewrap export EXPERIMENTAL_UPLOAD_VIA_CAT=1
mkdir -p "$BW_GIT_DEPLOY_CACHE"
unset PS1

View file

@ -15,7 +15,7 @@ Raspberry pi as soundcard
# install bw fork # install bw fork
pip3 install --editable git+file:///Users/mwiegand/Projekte/bundlewrap-fork@main#egg=bundlewrap pip3 install --editable git+file:///Users/mwiegand/Projekte/bundlewrap-fork#egg=bundlewrap
# monitor timers # monitor timers
@ -35,14 +35,3 @@ fi
``` ```
telegraf: execd for daemons telegraf: execd for daemons
TEST
# git signing
git config --global gpg.format ssh
git config --global commit.gpgsign true
git config user.name CroneKorkN
git config user.email i@ckn.li
git config user.signingkey "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILMVroYmswD4tLk6iH+2tvQiyaMe42yfONDsPDIdFv6I"

View file

@ -1,32 +0,0 @@
#!/usr/bin/env python3
from sys import argv
from os.path import realpath, dirname
from shlex import quote
from bundlewrap.repo import Repository
repo = Repository(dirname(dirname(realpath(__file__))))
if len(argv) == 1:
for node in repo.nodes:
for name in node.metadata.get('left4dead2/servers', {}):
print(name)
exit(0)
server = argv[1]
command = argv[2]
remote_code = """
from rcon.source import Client
with Client('127.0.0.1', {port}, passwd='''{password}''') as client:
response = client.run('''{command}''')
print(response)
"""
for node in repo.nodes:
for name, conf in node.metadata.get('left4dead2/servers', {}).items():
if name == server:
response = node.run('python3 -c ' + quote(remote_code.format(port=conf['port'], password=conf['rcon_password'], command=command)))
print(response.stdout.decode())

View file

@ -10,6 +10,7 @@ nodes = [
for node in sorted(repo.nodes_in_group('debian')) for node in sorted(repo.nodes_in_group('debian'))
if not node.dummy if not node.dummy
] ]
reboot_nodes = []
print('updating nodes:', sorted(node.name for node in nodes)) print('updating nodes:', sorted(node.name for node in nodes))
@ -23,13 +24,14 @@ for node in nodes:
print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt update').stdout.decode())
print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable').stdout.decode())
if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()): if int(node.run('DEBIAN_FRONTEND=noninteractive apt list --upgradable 2> /dev/null | grep upgradable | wc -l').stdout.decode()):
print(node.run('DEBIAN_FRONTEND=noninteractive apt -qy full-upgrade').stdout.decode()) print(node.run('DEBIAN_FRONTEND=noninteractive apt -y dist-upgrade').stdout.decode())
reboot_nodes.append(node)
# REBOOT IN ORDER # REBOOT IN ORDER
wireguard_servers = [ wireguard_servers = [
node node
for node in nodes for node in reboot_nodes
if node.has_bundle('wireguard') if node.has_bundle('wireguard')
and ( and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen < ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen <
@ -39,7 +41,7 @@ wireguard_servers = [
wireguard_s2s = [ wireguard_s2s = [
node node
for node in nodes for node in reboot_nodes
if node.has_bundle('wireguard') if node.has_bundle('wireguard')
and ( and (
ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen == ip_interface(node.metadata.get('wireguard/my_ip')).network.prefixlen ==
@ -49,7 +51,7 @@ wireguard_s2s = [
everything_else = [ everything_else = [
node node
for node in nodes for node in reboot_nodes
if not node.has_bundle('wireguard') if not node.has_bundle('wireguard')
] ]
@ -60,11 +62,8 @@ for node in [
*wireguard_s2s, *wireguard_s2s,
*wireguard_servers, *wireguard_servers,
]: ]:
print('rebooting', node.name)
try: try:
if node.run('test -e /var/run/reboot-required', may_fail=True).return_code == 0: print(node.run('systemctl reboot').stdout.decode())
print('rebooting', node.name)
print(node.run('systemctl reboot').stdout.decode())
else:
print('not rebooting', node.name)
except Exception as e: except Exception as e:
print(e) print(e)

View file

@ -5,17 +5,9 @@ from os.path import realpath, dirname
from sys import argv from sys import argv
from ipaddress import ip_network, ip_interface from ipaddress import ip_network, ip_interface
if len(argv) != 3:
print(f'usage: {argv[0]} <node> <client>')
exit(1)
repo = Repository(dirname(dirname(realpath(__file__)))) repo = Repository(dirname(dirname(realpath(__file__))))
server_node = repo.get_node(argv[1]) server_node = repo.get_node(argv[1])
if argv[2] not in server_node.metadata.get('wireguard/clients'):
print(f'client {argv[2]} not found in: {server_node.metadata.get("wireguard/clients").keys()}')
exit(1)
data = server_node.metadata.get(f'wireguard/clients/{argv[2]}') data = server_node.metadata.get(f'wireguard/clients/{argv[2]}')
vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network vpn_network = ip_interface(server_node.metadata.get('wireguard/my_ip')).network
@ -28,7 +20,9 @@ for peer in server_node.metadata.get('wireguard/s2s').values():
if not ip_network(network).subnet_of(vpn_network): if not ip_network(network).subnet_of(vpn_network):
allowed_ips.append(ip_network(network)) allowed_ips.append(ip_network(network))
conf = f''' conf = \
f'''>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
[Interface] [Interface]
PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])} PrivateKey = {repo.libs.wireguard.privkey(data['peer_id'])}
ListenPort = 51820 ListenPort = 51820
@ -41,12 +35,11 @@ PresharedKey = {repo.libs.wireguard.psk(data['peer_id'], server_node.metadata.ge
AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))} AllowedIPs = {', '.join(str(client_route) for client_route in sorted(allowed_ips))}
Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820 Endpoint = {ip_interface(server_node.metadata.get('network/external/ipv4')).ip}:51820
PersistentKeepalive = 10 PersistentKeepalive = 10
'''
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'''
print(conf) print(conf)
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
if input("print qrcode? [Yn]: ").upper() in ['', 'Y']: if input("print qrcode? [yN]: ").upper() == 'Y':
import pyqrcode import pyqrcode
print(pyqrcode.create(conf).terminal(quiet_zone=1)) print(pyqrcode.create(conf).terminal(quiet_zone=1))

View file

@ -1,6 +1,3 @@
# https://manpages.debian.org/latest/apt/sources.list.5.de.html
# https://repolib.readthedocs.io/en/latest/deb822-format.html
```python ```python
{ {
'apt': { 'apt': {
@ -8,32 +5,8 @@
'apt-transport-https': {}, 'apt-transport-https': {},
}, },
'sources': { 'sources': {
'debian': { # place key under data/apt/keys/packages.cloud.google.com.{asc|gpg}
'types': { # optional, defaults to `{'deb'}`` 'deb https://packages.cloud.google.com/apt cloud-sdk main',
'deb',
'deb-src',
},
'options': { # optional
'aarch': 'amd64',
},
'urls': {
'https://deb.debian.org/debian',
},
'suites': { # at least one
'{codename}',
'{codename}-updates',
'{codename}-backports',
},
'components': { # optional
'main',
'contrib',
'non-frese',
},
# key:
# - optional, defaults to source name (`debian` in this example)
# - place key under data/apt/keys/debian-12.{asc|gpg}
'key': 'debian-{version}',
},
}, },
}, },
} }

View file

@ -1,15 +0,0 @@
#!/bin/bash
apt update -qq --silent 2> /dev/null
UPGRADABLE=$(apt list --upgradable -qq 2> /dev/null | cut -d '/' -f 1)
if test "$UPGRADABLE" != ""
then
echo "$(wc -l <<< $UPGRADABLE) package(s) upgradable:"
echo
echo "$UPGRADABLE"
exit 1
else
exit 0
fi

View file

@ -1,68 +1,33 @@
# TODO pin repo: https://superuser.com/a/1595920 from os.path import join
from urllib.parse import urlparse
from glob import glob
from os.path import join, basename from os.path import join, basename
directories = { directories = {
'/etc/apt': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/apt.conf.d': {
# existance is expected
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/keyrings': {
# https://askubuntu.com/a/1307181
'purge': True,
'triggers': {
'action:apt_update',
},
},
# '/etc/apt/listchanges.conf.d': {
# 'purge': True,
# 'triggers': {
# 'action:apt_update',
# },
# },
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list.d': { '/etc/apt/sources.list.d': {
'purge': True, 'purge': True,
'triggers': { 'triggers': {
'action:apt_update', 'action:apt_update',
}, },
}, },
'/etc/apt/trusted.gpg.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
'/etc/apt/preferences.d': {
'purge': True,
'triggers': {
'action:apt_update',
},
},
} }
files = { files = {
'/etc/apt/apt.conf': {
'content': repo.libs.apt.render_apt_conf(node.metadata.get('apt/config')),
'triggers': {
'action:apt_update',
},
},
'/etc/apt/sources.list': { '/etc/apt/sources.list': {
'content': '# managed by bundlewrap\n', 'content': '# managed'
'triggers': {
'action:apt_update',
},
}, },
# '/etc/apt/listchanges.conf': {
# 'content': repo.libs.ini.dumps(node.metadata.get('apt/list_changes')),
# },
'/usr/lib/nagios/plugins/check_apt_upgradable': {
'mode': '0755',
},
# /etc/kernel/postinst.d/apt-auto-removal
} }
actions = { actions = {
@ -76,22 +41,41 @@ actions = {
}, },
} }
# create sources.lists and respective keyfiles # group sources by apt server hostname
for name, config in node.metadata.get('apt/sources').items(): hosts = {}
# place keyfile
keyfile_destination_path = repo.libs.apt.format_variables(node, config['options']['Signed-By']) for source_string in node.metadata.get('apt/sources'):
files[keyfile_destination_path] = { source = repo.libs.apt.AptSource(source_string)
'source': join(repo.path, 'data', 'apt', 'keys', basename(keyfile_destination_path)), hosts\
'content_type': 'binary', .setdefault(source.url.hostname, list())\
.append(source)
# create sources lists and keyfiles
for host, sources in hosts.items():
keyfile = basename(glob(join(repo.path, 'data', 'apt', 'keys', f'{host}.*'))[0])
destination_path = f'/etc/apt/trusted.gpg.d/{keyfile}'
for source in sources:
source.options['signed-by'] = [destination_path]
files[f'/etc/apt/sources.list.d/{host}.list'] = {
'content': '\n'.join(sorted(set(
str(source).format(
release=node.metadata.get('os_release'),
version=node.os_version[0], # WIP crystal
)
for source in sources
))),
'triggers': { 'triggers': {
'action:apt_update', 'action:apt_update',
}, },
} }
# place sources.list files[destination_path] = {
files[f'/etc/apt/sources.list.d/{name}.sources'] = { 'source': join(repo.path, 'data', 'apt', 'keys', keyfile),
'content': repo.libs.apt.render_source(node, name), 'content_type': 'binary',
'triggers': { 'triggers': {
'action:apt_update', 'action:apt_update',
}, },
@ -99,14 +83,14 @@ for name, config in node.metadata.get('apt/sources').items():
# create backport pinnings # create backport pinnings
for package, options in node.metadata.get('apt/packages', {}).items(): for package, options in node.metadata.get('apt/packages', {}).items():
pkg_apt[package] = options pkg_apt[package] = options
if pkg_apt[package].pop('backports', False): if pkg_apt[package].pop('backports', False):
files[f'/etc/apt/preferences.d/{package}'] = { files[f'/etc/apt/preferences.d/{package}'] = {
'content': '\n'.join([ 'content': '\n'.join([
f"Package: {package}", f"Package: {package}",
f"Pin: release a={node.metadata.get('os_codename')}-backports", f"Pin: release a={node.metadata.get('os_release')}-backports",
f"Pin-Priority: 900", f"Pin-Priority: 900",
]), ]),
'needed_by': [ 'needed_by': [
@ -116,25 +100,3 @@ for package, options in node.metadata.get('apt/packages', {}).items():
'action:apt_update', 'action:apt_update',
}, },
} }
# unattended upgrades
#
# unattended-upgrades.service: delays shutdown if necessary
# apt-daily.timer: performs apt update
# apt-daily-upgrade.timer: performs apt upgrade
svc_systemd['unattended-upgrades.service'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}
svc_systemd['apt-daily-upgrade.timer'] = {
'needs': [
'pkg_apt:unattended-upgrades',
],
}

View file

@ -1,177 +1,6 @@
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {},
'apt-listchanges': { 'sources': set(),
'installed': False,
},
},
'config': {
'DPkg': {
'Pre-Install-Pkgs': {
'/usr/sbin/dpkg-preconfigure --apt || true',
},
'Post-Invoke': {
# keep package cache empty
'/bin/rm -f /var/cache/apt/archives/*.deb || true',
},
'Options': {
# https://unix.stackexchange.com/a/642541/357916
'--force-confold',
'--force-confdef',
},
},
'APT': {
'NeverAutoRemove': {
'^firmware-linux.*',
'^linux-firmware$',
'^linux-image-[a-z0-9]*$',
'^linux-image-[a-z0-9]*-[a-z0-9]*$',
},
'VersionedKernelPackages': {
# kernels
'linux-.*',
'kfreebsd-.*',
'gnumach-.*',
# (out-of-tree) modules
'.*-modules',
'.*-kernel',
},
'Never-MarkAuto-Sections': {
'metapackages',
'tasks',
},
'Move-Autobit-Sections': {
'oldlibs',
},
'Update': {
# https://unix.stackexchange.com/a/653377/357916
'Error-Mode': 'any',
},
},
},
'sources': {},
},
'monitoring': {
'services': {
'apt upgradable': {
'vars.command': '/usr/lib/nagios/plugins/check_apt_upgradable',
'vars.sudo': True,
'check_interval': '1h',
},
'current kernel': {
'vars.command': 'ls /boot/vmlinuz-* | sort -V | tail -n 1 | xargs -n1 basename | cut -d "-" -f 2- | grep -q "^$(uname -r)$"',
'check_interval': '1h',
},
'apt reboot-required': {
'vars.command': 'ls /var/run/reboot-required 2> /dev/null && exit 1 || exit 0',
'check_interval': '1h',
},
},
}, },
} }
@metadata_reactor.provides(
'apt/sources',
)
def key(metadata):
return {
'apt': {
'sources': {
source_name: {
'key': source_name,
}
for source_name, source_config in metadata.get('apt/sources').items()
if 'key' not in source_config
},
},
}
@metadata_reactor.provides(
'apt/sources',
)
def signed_by(metadata):
return {
'apt': {
'sources': {
source_name: {
'options': {
'Signed-By': '/etc/apt/keyrings/' + metadata.get(f'apt/sources/{source_name}/key') + '.' + repo.libs.apt.find_keyfile_extension(node, metadata.get(f'apt/sources/{source_name}/key')),
},
}
for source_name in metadata.get('apt/sources')
},
},
}
@metadata_reactor.provides(
'apt/config',
'apt/packages',
)
def unattended_upgrades(metadata):
return {
'apt': {
'config': {
'APT': {
'Periodic': {
'Update-Package-Lists': '1',
'Unattended-Upgrade': '1',
},
},
'Unattended-Upgrade': {
'Origins-Pattern': {
"origin=*",
},
},
},
'packages': {
'unattended-upgrades': {},
},
},
}
# @metadata_reactor.provides(
# 'apt/config',
# 'apt/list_changes',
# )
# def listchanges(metadata):
# return {
# 'apt': {
# 'config': {
# 'DPkg': {
# 'Pre-Install-Pkgs': {
# '/usr/bin/apt-listchanges --apt || test $? -lt 10',
# },
# 'Tools': {
# 'Options': {
# '/usr/bin/apt-listchanges': {
# 'Version': '2',
# 'InfoFD': '20',
# },
# },
# },
# },
# 'Dir': {
# 'Etc': {
# 'apt-listchanges-main': 'listchanges.conf',
# 'apt-listchanges-parts': 'listchanges.conf.d',
# },
# },
# },
# 'list_changes': {
# 'apt': {
# 'frontend': 'pager',
# 'which': 'news',
# 'email_address': 'root',
# 'email_format': 'text',
# 'confirm': 'false',
# 'headers': 'false',
# 'reverse': 'false',
# 'save_seen': '/var/lib/apt/listchanges.db',
# },
# },
# },
# }

View file

@ -1,47 +0,0 @@
#!/usr/bin/env python3
import json
from subprocess import check_output
from datetime import datetime, timedelta
now = datetime.now()
two_days_ago = now - timedelta(days=2)
with open('/etc/backup-freshness-check.json', 'r') as file:
config = json.load(file)
local_datasets = check_output(['zfs', 'list', '-H', '-o', 'name']).decode().splitlines()
errors = set()
for dataset in config['datasets']:
if f'tank/{dataset}' not in local_datasets:
errors.add(f'dataset "{dataset}" not present at all')
continue
snapshots = [
snapshot
for snapshot in check_output(['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot', f'tank/{dataset}', '-s', 'creation']).decode().splitlines()
if f"@{config['prefix']}" in snapshot
]
if not snapshots:
errors.add(f'dataset "{dataset}" has no backup snapshots')
continue
newest_backup_snapshot = snapshots[-1]
snapshot_datetime = datetime.utcfromtimestamp(
int(check_output(['zfs', 'list', '-p', '-H', '-o', 'creation', '-t', 'snapshot', newest_backup_snapshot]).decode())
)
if snapshot_datetime < two_days_ago:
days_ago = (now - snapshot_datetime).days
errors.add(f'dataset "{dataset}" has not been backed up for {days_ago} days')
continue
if errors:
for error in errors:
print(error)
exit(2)
else:
print(f"all {len(config['datasets'])} datasets have fresh backups.")

View file

@ -1,15 +0,0 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/backup-freshness-check.json': {
'content': dumps({
'prefix': node.metadata.get('backup-freshness-check/prefix'),
'datasets': node.metadata.get('backup-freshness-check/datasets'),
}, indent=4, sort_keys=True, cls=MetadataJSONEncoder),
},
'/usr/lib/nagios/plugins/check_backup_freshness': {
'mode': '0755',
},
}

View file

@ -1,37 +0,0 @@
defaults = {
'backup-freshness-check': {
'server': node.name,
'prefix': 'auto-backup_',
'datasets': {},
},
'monitoring': {
'services': {
'backup freshness': {
'vars.command': '/usr/lib/nagios/plugins/check_backup_freshness',
'check_interval': '6h',
'vars.sudo': True,
},
},
},
}
@metadata_reactor.provides(
'backup-freshness-check/datasets'
)
def backup_freshness_check(metadata):
return {
'backup-freshness-check': {
'datasets': {
f"{other_node.metadata.get('id')}/{dataset}"
for other_node in repo.nodes
if not other_node.dummy
and other_node.has_bundle('backup')
and other_node.has_bundle('zfs')
and other_node.metadata.get('backup/server') == metadata.get('backup-freshness-check/server')
for dataset, options in other_node.metadata.get('zfs/datasets').items()
if options.get('backup', True)
and not options.get('mountpoint', None) in [None, 'none']
},
},
}

View file

@ -16,14 +16,7 @@ defaults = {
'/usr/bin/rsync', '/usr/bin/rsync',
'/sbin/zfs', '/sbin/zfs',
}, },
}, }
'zfs': {
'datasets': {
'tank': {
'recordsize': "1048576",
},
},
},
} }
@ -32,10 +25,9 @@ defaults = {
) )
def zfs(metadata): def zfs(metadata):
datasets = {} datasets = {}
for other_node in repo.nodes: for other_node in repo.nodes:
if ( if (
not other_node.dummy and
other_node.has_bundle('backup') and other_node.has_bundle('backup') and
other_node.metadata.get('backup/server') == node.name other_node.metadata.get('backup/server') == node.name
): ):
@ -50,7 +42,7 @@ def zfs(metadata):
'com.sun:auto-snapshot': 'false', 'com.sun:auto-snapshot': 'false',
'backup': False, 'backup': False,
} }
# for rsync backups # for rsync backups
datasets[f'{base_dataset}/fs'] = { datasets[f'{base_dataset}/fs'] = {
'mountpoint': f"/mnt/backups/{id}", 'mountpoint': f"/mnt/backups/{id}",
@ -59,10 +51,10 @@ def zfs(metadata):
'com.sun:auto-snapshot': 'true', 'com.sun:auto-snapshot': 'true',
'backup': False, 'backup': False,
} }
# for zfs send/recv # for zfs send/recv
if other_node.has_bundle('zfs'): if other_node.has_bundle('zfs'):
# base datasets for each tank # base datasets for each tank
for pool in other_node.metadata.get('zfs/pools'): for pool in other_node.metadata.get('zfs/pools'):
datasets[f'{base_dataset}/{pool}'] = { datasets[f'{base_dataset}/{pool}'] = {
@ -72,7 +64,7 @@ def zfs(metadata):
'com.sun:auto-snapshot': 'false', 'com.sun:auto-snapshot': 'false',
'backup': False, 'backup': False,
} }
# actual datasets # actual datasets
for path in other_node.metadata.get('backup/paths'): for path in other_node.metadata.get('backup/paths'):
for dataset, config in other_node.metadata.get('zfs/datasets').items(): for dataset, config in other_node.metadata.get('zfs/datasets').items():
@ -99,7 +91,7 @@ def zfs(metadata):
def dns(metadata): def dns(metadata):
return { return {
'dns': { 'dns': {
metadata.get('backup-server/hostname'): repo.libs.ip.get_a_records(metadata), metadata.get('backup-server/hostname'): repo.libs.dns.get_a_records(metadata),
} }
} }

View file

@ -1,31 +1,11 @@
#!/bin/bash #!/bin/bash
set -u
# FIXME: inelegant # FIXME: inelegant
% if wol_command: % if wol_command:
${wol_command} ${wol_command}
% endif % endif
exit=0
failed_paths=""
for path in $(jq -r '.paths | .[]' < /etc/backup/config.json) for path in $(jq -r '.paths | .[]' < /etc/backup/config.json)
do do
echo backing up $path
/opt/backup/backup_path "$path" /opt/backup/backup_path "$path"
# set exit to 1 if any backup fails
if [ $? -ne 0 ]
then
echo ERROR: backing up $path failed >&2
exit=5
failed_paths="$failed_paths $path"
fi
done done
if [ $exit -ne 0 ]
then
echo "ERROR: failed to backup paths: $failed_paths" >&2
fi
exit $exit

View file

@ -1,13 +1,11 @@
#!/bin/bash #!/bin/bash
set -exu
path=$1 path=$1
if zfs list -H -o mountpoint | grep -q "^$path$" if zfs list -H -o mountpoint | grep -q "$path"
then then
/opt/backup/backup_path_via_zfs "$path" /opt/backup/backup_path_via_zfs "$path"
elif test -e "$path" elif test -d "$path"
then then
/opt/backup/backup_path_via_rsync "$path" /opt/backup/backup_path_via_rsync "$path"
else else

View file

@ -7,14 +7,5 @@ uuid=$(jq -r .client_uuid < /etc/backup/config.json)
server=$(jq -r .server_hostname < /etc/backup/config.json) server=$(jq -r .server_hostname < /etc/backup/config.json)
ssh="ssh -o ConnectTimeout=5 backup-receiver@$server" ssh="ssh -o ConnectTimeout=5 backup-receiver@$server"
if test -d "$path" rsync -av --rsync-path="sudo rsync" "$path/" "backup-receiver@$server:/mnt/backups/$uuid$path/"
then $ssh sudo zfs snap "tank/$uuid/fs@auto-backup_$(date +"%Y-%m-%d_%H:%M:%S")"
postfix="/"
elif test -f "$path"
then
postfix=""
else
exit 1
fi
rsync -av --rsync-path="sudo rsync" "$path$postfix" "backup-receiver@$server:/mnt/backups/$uuid$path$postfix"

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -eu set -exu
path=$1 path=$1
uuid=$(jq -r .client_uuid < /etc/backup/config.json) uuid=$(jq -r .client_uuid < /etc/backup/config.json)
@ -39,20 +39,20 @@ else
echo "INCREMENTAL BACKUP" echo "INCREMENTAL BACKUP"
last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2) last_bookmark=$(zfs list -t bookmark -H -o name | grep "^$source_dataset#$bookmark_prefix" | sort | tail -1 | cut -d '#' -f 2)
[[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98 [[ -z "$last_bookmark" ]] && echo "ERROR - last_bookmark is empty" && exit 98
$(zfs send -v -L -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset") $(zfs send -v -i "#$last_bookmark" "$source_dataset@$new_bookmark" | $ssh sudo zfs recv "$target_dataset")
fi fi
if [[ "$?" == "0" ]] if [[ "$?" == "0" ]]
then then
# delete old local bookmarks # delete old local bookmarks
for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$source_dataset" | grep "^$source_dataset#$bookmark_prefix") for destroyable_bookmark in $(zfs list -t bookmark -H -o name "$dataset" | grep "^$dataset#$bookmark_prefix")
do do
zfs destroy "$destroyable_bookmark" zfs destroy "$destroyable_bookmark"
done done
# delete remote snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho) # delete snapshots from bookmarks (except newest, even of not necessary; maybe for resuming tho)
for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$target_dataset" | grep "^$target_dataset@$bookmark_prefix" | grep -v "$new_bookmark") for destroyable_snapshot in $($ssh sudo zfs list -t snapshot -H -o name "$dataset" | grep "^$dataset@$bookmark_prefix" | grep -v "$new_bookmark")
do do
$ssh sudo zfs destroy "$destroyable_snapshot" $ssh sudo zfs destroy "$destroyable_snapshot"
done done

View file

@ -20,11 +20,7 @@ defaults = {
'systemd-timers': { 'systemd-timers': {
f'backup': { f'backup': {
'command': '/opt/backup/backup_all', 'command': '/opt/backup/backup_all',
'when': '1:00', 'when': 'daily',
'persistent': True,
'after': {
'network-online.target',
},
}, },
}, },
} }

View file

@ -10,7 +10,7 @@ options {
% if type == 'master': % if type == 'master':
notify yes; notify yes;
also-notify { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} }; also-notify { ${' '.join([f'{ip};' for ip in slave_ips])} };
allow-transfer { ${' '.join(sorted(f'{ip};' for ip in slave_ips))} }; allow-transfer { ${' '.join([f'{ip};' for ip in slave_ips])} };
% endif % endif
}; };

View file

@ -1,5 +1,7 @@
from ipaddress import ip_address, ip_interface from ipaddress import ip_address, ip_interface
from datetime import datetime from datetime import datetime
import json
from bundlewrap.metadata import MetadataJSONEncoder
from hashlib import sha3_512 from hashlib import sha3_512
@ -19,7 +21,7 @@ directories[f'/var/lib/bind'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -29,7 +31,7 @@ files['/etc/default/bind9'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -43,7 +45,7 @@ files['/etc/bind/named.conf'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -63,7 +65,7 @@ files['/etc/bind/named.conf.options'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -93,7 +95,7 @@ files['/etc/bind/named.conf.local'] = {
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -106,7 +108,7 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
@ -127,10 +129,10 @@ for view_name, view_conf in master_node.metadata.get('bind/views').items():
'svc_systemd:bind9', 'svc_systemd:bind9',
], ],
'triggers': [ 'triggers': [
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
], ],
} }
svc_systemd['bind9'] = {} svc_systemd['bind9'] = {}
@ -139,6 +141,6 @@ actions['named-checkconf'] = {
'unless': 'named-checkconf -z', 'unless': 'named-checkconf -z',
'needs': [ 'needs': [
'svc_systemd:bind9', 'svc_systemd:bind9',
'svc_systemd:bind9:reload', 'svc_systemd:bind9:restart',
] ]
} }

View file

@ -3,7 +3,6 @@ from json import dumps
h = repo.libs.hashable.hashable h = repo.libs.hashable.hashable
repo.libs.bind.repo = repo repo.libs.bind.repo = repo
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {
@ -42,12 +41,6 @@ defaults = {
}, },
'zones': set(), 'zones': set(),
}, },
'nftables': {
'input': {
'tcp dport 53 accept',
'udp dport 53 accept',
},
},
'telegraf': { 'telegraf': {
'config': { 'config': {
'inputs': { 'inputs': {
@ -93,7 +86,7 @@ def master_slave(metadata):
def dns(metadata): def dns(metadata):
return { return {
'dns': { 'dns': {
metadata.get('bind/hostname'): repo.libs.ip.get_a_records(metadata), metadata.get('bind/hostname'): repo.libs.dns.get_a_records(metadata),
} }
} }
@ -104,7 +97,7 @@ def dns(metadata):
def collect_records(metadata): def collect_records(metadata):
if metadata.get('bind/type') == 'slave': if metadata.get('bind/type') == 'slave':
return {} return {}
views = {} views = {}
for view_name, view_conf in metadata.get('bind/views').items(): for view_name, view_conf in metadata.get('bind/views').items():
@ -124,7 +117,7 @@ def collect_records(metadata):
name = fqdn[0:-len(zone) - 1] name = fqdn[0:-len(zone) - 1]
for type, values in records.items(): for type, values in records.items():
for value in values: for value in values:
if repo.libs.bind.record_matches_view(value, type, name, zone, view_name, metadata): if repo.libs.bind.record_matches_view(value, type, name, zone, view_name, metadata):
views\ views\
@ -135,7 +128,7 @@ def collect_records(metadata):
.add( .add(
h({'name': name, 'type': type, 'value': value}) h({'name': name, 'type': type, 'value': value})
) )
return { return {
'bind': { 'bind': {
'views': views, 'views': views,
@ -167,7 +160,7 @@ def ns_records(metadata):
# FIXME: bw currently cant handle lists of dicts :( # FIXME: bw currently cant handle lists of dicts :(
h({'name': '@', 'type': 'NS', 'value': f"{nameserver}."}) h({'name': '@', 'type': 'NS', 'value': f"{nameserver}."})
for nameserver in nameservers for nameserver in nameservers
} }
} }
for zone_name, zone_conf in view_conf['zones'].items() for zone_name, zone_conf in view_conf['zones'].items()
} }
@ -184,7 +177,7 @@ def ns_records(metadata):
def slaves(metadata): def slaves(metadata):
if metadata.get('bind/type') == 'slave': if metadata.get('bind/type') == 'slave':
return {} return {}
return { return {
'bind': { 'bind': {
'slaves': [ 'slaves': [

View file

@ -1,10 +1,6 @@
from shlex import quote from shlex import quote
defaults = {
'build-ci': {},
}
@metadata_reactor.provides( @metadata_reactor.provides(
'users/build-ci/authorized_users', 'users/build-ci/authorized_users',
'sudoers/build-ci', 'sudoers/build-ci',
@ -22,7 +18,7 @@ def ssh_keys(metadata):
}, },
'sudoers': { 'sudoers': {
'build-ci': { 'build-ci': {
f"/usr/bin/chown -R build-ci\\:{quote(ci['group'])} {quote(ci['path'])}" f"/usr/bin/chown -R build-ci\:{quote(ci['group'])} {quote(ci['path'])}"
for ci in metadata.get('build-ci').values() for ci in metadata.get('build-ci').values()
} }
}, },

View file

@ -71,7 +71,6 @@ def nginx(metadata):
'context': { 'context': {
'target': 'http://127.0.0.1:4000', 'target': 'http://127.0.0.1:4000',
}, },
'check_path': '/status',
}, },
}, },
}, },

View file

@ -1,20 +1,10 @@
debian_version = min([node.os_version, (11,)])[0] # FIXME
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {
'crystal': {}, 'crystal': {},
}, },
'sources': { 'sources': {
'crystal': { 'deb http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_{version}/ /',
# https://software.opensuse.org/download.html?project=devel%3Alanguages%3Acrystal&package=crystal
'urls': {
'http://download.opensuse.org/repositories/devel:/languages:/crystal/Debian_Testing/',
},
'suites': {
'/',
},
},
}, },
}, },
} }

View file

@ -1,12 +1,9 @@
DOVECOT DOVECOT
======= =======
rescan index rescan index: https://doc.dovecot.org/configuration_manual/fts/#rescan
------------
https://doc.dovecot.org/configuration_manual/fts/#rescan
``` ```
doveadm fts rescan -u 'i@ckn.li' sudo -u vmail doveadm fts rescan -u 'test@mail2.sublimity.de'
doveadm index -u 'i@ckn.li' -q '*' sudo -u vmail doveadm index -u 'test@mail2.sublimity.de' -q '*'
``` ```

View file

@ -66,7 +66,8 @@ xmlunzip() {
trap "rm -rf $path $tempdir" 0 1 2 3 14 15 trap "rm -rf $path $tempdir" 0 1 2 3 14 15
cd $tempdir || exit 1 cd $tempdir || exit 1
unzip -q "$path" 2>/dev/null || exit 0 unzip -q "$path" 2>/dev/null || exit 0
find . -name "$name" -print0 | xargs -0 cat | /usr/lib/dovecot/xml2text find . -name "$name" -print0 | xargs -0 cat |
$libexec_dir/xml2text
} }
wait_timeout() { wait_timeout() {

View file

@ -2,14 +2,7 @@ connect = host=${host} dbname=${name} user=${user} password=${password}
driver = pgsql driver = pgsql
default_pass_scheme = ARGON2ID default_pass_scheme = ARGON2ID
user_query = SELECT '/var/vmail/%u' AS home, 'vmail' AS uid, 'vmail' AS gid password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password\
iterate_query = SELECT CONCAT(users.name, '@', domains.name) AS user \
FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL
password_query = SELECT CONCAT(users.name, '@', domains.name) AS user, password \
FROM users \ FROM users \
LEFT JOIN domains ON users.domain_id = domains.id \ LEFT JOIN domains ON users.domain_id = domains.id \
WHERE redirect IS NULL \ WHERE redirect IS NULL \

View file

@ -6,26 +6,26 @@ ssl_cert = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')
ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem ssl_key = </var/lib/dehydrated/certs/${node.metadata.get('mailserver/hostname')}/privkey.pem
ssl_dh = </etc/dovecot/dhparam.pem ssl_dh = </etc/dovecot/dhparam.pem
ssl_client_ca_dir = /etc/ssl/certs ssl_client_ca_dir = /etc/ssl/certs
mail_location = maildir:${node.metadata.get('mailserver/maildir')}/%u:INDEX=${node.metadata.get('mailserver/maildir')}/index/%u mail_location = maildir:~
mail_plugins = fts fts_xapian mail_plugins = fts fts_xapian
namespace inbox { namespace inbox {
inbox = yes inbox = yes
separator = . separator = .
mailbox Drafts { mailbox Drafts {
auto = subscribe auto = subscribe
special_use = \Drafts special_use = \Drafts
} }
mailbox Junk { mailbox Junk {
auto = create auto = create
special_use = \Junk special_use = \Junk
} }
mailbox Trash { mailbox Trash {
auto = subscribe auto = subscribe
special_use = \Trash special_use = \Trash
} }
mailbox Sent { mailbox Sent {
auto = subscribe auto = subscribe
special_use = \Sent special_use = \Sent
} }
} }
@ -34,10 +34,9 @@ passdb {
driver = sql driver = sql
args = /etc/dovecot/dovecot-sql.conf args = /etc/dovecot/dovecot-sql.conf
} }
# use sql for userdb too, to enable iterate_query
userdb { userdb {
driver = sql driver = static
args = /etc/dovecot/dovecot-sql.conf args = uid=vmail gid=vmail home=/var/vmail/%u
} }
service auth { service auth {
@ -81,10 +80,10 @@ protocol imap {
mail_plugins = $mail_plugins imap_sieve mail_plugins = $mail_plugins imap_sieve
mail_max_userip_connections = 50 mail_max_userip_connections = 50
imap_idle_notify_interval = 29 mins imap_idle_notify_interval = 29 mins
} }
protocol lmtp { protocol lmtp {
mail_plugins = $mail_plugins sieve mail_plugins = $mail_plugins sieve
} }
protocol sieve { protocol sieve {
plugin { plugin {
sieve = /var/vmail/sieve/%u.sieve sieve = /var/vmail/sieve/%u.sieve
@ -118,7 +117,7 @@ plugin {
sieve_dir = /var/vmail/sieve/%u/ sieve_dir = /var/vmail/sieve/%u/
sieve = /var/vmail/sieve/%u.sieve sieve = /var/vmail/sieve/%u.sieve
sieve_pipe_bin_dir = /var/vmail/sieve/bin sieve_pipe_bin_dir = /var/vmail/sieve/bin
sieve_extensions = +vnd.dovecot.pipe sieve_extensions = +vnd.dovecot.pipe
sieve_after = /var/vmail/sieve/global/spam-to-folder.sieve sieve_after = /var/vmail/sieve/global/spam-to-folder.sieve

View file

@ -20,10 +20,6 @@ directories = {
'owner': 'vmail', 'owner': 'vmail',
'group': 'vmail', 'group': 'vmail',
}, },
'/var/vmail/index': {
'owner': 'vmail',
'group': 'vmail',
},
'/var/vmail/sieve': { '/var/vmail/sieve': {
'owner': 'vmail', 'owner': 'vmail',
'group': 'vmail', 'group': 'vmail',

View file

@ -13,26 +13,15 @@ defaults = {
'catdoc': {}, # catdoc, catppt, xls2csv 'catdoc': {}, # catdoc, catppt, xls2csv
}, },
}, },
'dovecot': {
'database': {
'dbname': 'mailserver',
'dbuser': 'mailserver',
},
},
'letsencrypt': { 'letsencrypt': {
'reload_after': { 'reload_after': {
'dovecot', 'dovecot',
}, },
}, },
'nftables': { 'dovecot': {
'input': { 'database': {
'tcp dport {143, 993, 4190} accept', 'dbname': 'mailserver',
}, 'dbuser': 'mailserver',
},
'systemd-timers': {
'dovecot-optimize-index': {
'command': '/usr/bin/doveadm fts optimize -A',
'when': 'daily',
}, },
}, },
} }

View file

@ -0,0 +1,6 @@
# directories = {
# '/var/lib/downloads': {
# 'owner': 'downloads',
# 'group': 'www-data',
# }
# }

View file

@ -1,23 +0,0 @@
Pg Pass workaround: set manually:
```
root@freescout /ro psql freescout
psql (15.6 (Debian 15.6-0+deb12u1))
Type "help" for help.
freescout=# \password freescout
Enter new password for user "freescout":
Enter it again:
freescout=#
\q
```
# problems
# check if /opt/freescout/.env is resettet
# ckeck `psql -h localhost -d freescout -U freescout -W`with pw from .env
# chown -R www-data:www-data /opt/freescout
# sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash
# javascript funny? `sudo su - www-data -c 'php /opt/freescout/artisan storage:link' -s /bin/bash`
# benutzer bilder weg? aus dem backup holen: `/opt/freescout/.zfs/snapshot/zfs-auto-snap_hourly-2024-11-22-1700/storage/app/public/users` `./customers`

View file

@ -1,66 +0,0 @@
# https://github.com/freescout-helpdesk/freescout/wiki/Installation-Guide
run_as = repo.libs.tools.run_as
php_version = node.metadata.get('php/version')
directories = {
'/opt/freescout': {
'owner': 'www-data',
'group': 'www-data',
# chown -R www-data:www-data /opt/freescout
},
}
actions = {
# 'clone_freescout': {
# 'command': run_as('www-data', 'git clone https://github.com/freescout-helpdesk/freescout.git /opt/freescout'),
# 'unless': 'test -e /opt/freescout/.git',
# 'needs': [
# 'pkg_apt:git',
# 'directory:/opt/freescout',
# ],
# },
# 'pull_freescout': {
# 'command': run_as('www-data', 'git -C /opt/freescout fetch origin dist && git -C /opt/freescout reset --hard origin/dist && git -C /opt/freescout clean -f'),
# 'unless': run_as('www-data', 'git -C /opt/freescout fetch origin && git -C /opt/freescout status -uno | grep -q "Your branch is up to date"'),
# 'needs': [
# 'action:clone_freescout',
# ],
# 'triggers': [
# 'action:freescout_artisan_update',
# f'svc_systemd:php{php_version}-fpm.service:restart',
# ],
# },
# 'freescout_artisan_update': {
# 'command': run_as('www-data', 'php /opt/freescout/artisan freescout:after-app-update'),
# 'triggered': True,
# 'needs': [
# f'svc_systemd:php{php_version}-fpm.service:restart',
# 'action:pull_freescout',
# ],
# },
}
# svc_systemd = {
# f'freescout-cron.service': {},
# }
# files = {
# '/opt/freescout/.env': {
# # https://github.com/freescout-helpdesk/freescout/blob/dist/.env.example
# # Every time you are making changes in .env file, in order changes to take an effect you need to run:
# # ´sudo su - www-data -c 'php /opt/freescout/artisan freescout:clear-cache' -s /bin/bash´
# 'owner': 'www-data',
# 'content': '\n'.join(
# f'{k}={v}' for k, v in
# sorted(node.metadata.get('freescout/env').items())
# ) + '\n',
# 'needs': [
# 'directory:/opt/freescout',
# 'action:clone_freescout',
# ],
# },
# }
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'
#sudo su - www-data -s /bin/bash -c 'php /opt/freescout/artisan freescout:create-user --role admin --firstName M --lastName W --email freescout@freibrief.net --password gyh.jzv2bnf6hvc.HKG --no-interaction'

View file

@ -1,121 +0,0 @@
from base64 import b64decode
# hash: SCRAM-SHA-256$4096:tQNfqQi7seqNDwJdHqCHbg==$r3ibECluHJaY6VRwpvPqrtCjgrEK7lAkgtUO8/tllTU=:+eeo4M0L2SowfyHFxT2FRqGzezve4ZOEocSIo11DATA=
database_password = repo.vault.password_for(f'{node.name} postgresql freescout').value
defaults = {
'apt': {
'packages': {
'git': {},
'php': {},
'php-pgsql': {},
'php-fpm': {},
'php-mbstring': {},
'php-xml': {},
'php-imap': {},
'php-zip': {},
'php-gd': {},
'php-curl': {},
'php-intl': {},
},
},
'freescout': {
'env': {
'APP_TIMEZONE': 'Europe/Berlin',
'DB_CONNECTION': 'pgsql',
'DB_HOST': '127.0.0.1',
'DB_PORT': '5432',
'DB_DATABASE': 'freescout',
'DB_USERNAME': 'freescout',
'DB_PASSWORD': database_password,
'APP_KEY': 'base64:' + repo.vault.random_bytes_as_base64_for(f'{node.name} freescout APP_KEY', length=32).value
},
},
'php': {
'php.ini': {
'cgi': {
'fix_pathinfo': '0',
},
},
},
'postgresql': {
'roles': {
'freescout': {
'password_hash': repo.libs.postgres.generate_scram_sha_256(
database_password,
b64decode(repo.vault.random_bytes_as_base64_for(f'{node.name} postgres freescout', length=16).value.encode()),
),
},
},
'databases': {
'freescout': {
'owner': 'freescout',
},
},
},
# 'systemd': {
# 'units': {
# f'freescout-cron.service': {
# 'Unit': {
# 'Description': 'Freescout Cron',
# 'After': 'network.target',
# },
# 'Service': {
# 'User': 'www-data',
# 'Nice': 10,
# 'ExecStart': f"/usr/bin/php /opt/freescout/artisan schedule:run"
# },
# 'Install': {
# 'WantedBy': {
# 'multi-user.target'
# }
# },
# }
# },
# },
'systemd-timers': {
'freescout-cron': {
'command': '/usr/bin/php /opt/freescout/artisan schedule:run',
'when': '*-*-* *:*:00',
'RuntimeMaxSec': '180',
'user': 'www-data',
},
},
'zfs': {
'datasets': {
'tank/freescout': {
'mountpoint': '/opt/freescout',
},
},
},
}
@metadata_reactor.provides(
'freescout/env/APP_URL',
)
def freescout(metadata):
return {
'freescout': {
'env': {
'APP_URL': 'https://' + metadata.get('freescout/domain') + '/',
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('freescout/domain'): {
'content': 'freescout/vhost.conf',
},
},
},
}

View file

@ -8,15 +8,7 @@ defaults = {
'python3-crcmod': {}, 'python3-crcmod': {},
}, },
'sources': { 'sources': {
'google-cloud': { 'deb https://packages.cloud.google.com/apt cloud-sdk main',
'url': 'https://packages.cloud.google.com/apt/',
'suites': {
'cloud-sdk',
},
'components': {
'main',
},
},
}, },
}, },
} }

View file

@ -1,4 +1,3 @@
[DEFAULT]
APP_NAME = ckn-gitea APP_NAME = ckn-gitea
RUN_USER = git RUN_USER = git
RUN_MODE = prod RUN_MODE = prod
@ -14,24 +13,40 @@ MEMBERS_PAGING_NUM = 100
[server] [server]
PROTOCOL = http PROTOCOL = http
SSH_DOMAIN = ${domain}
DOMAIN = ${domain}
HTTP_ADDR = 0.0.0.0 HTTP_ADDR = 0.0.0.0
HTTP_PORT = 3500 HTTP_PORT = 3500
ROOT_URL = https://${domain}/
DISABLE_SSH = true DISABLE_SSH = true
SSH_PORT = 22 SSH_PORT = 22
LFS_START_SERVER = true LFS_START_SERVER = true
LFS_CONTENT_PATH = /var/lib/gitea/data/lfs LFS_CONTENT_PATH = /var/lib/gitea/data/lfs
LFS_JWT_SECRET = ${lfs_secret_key}
OFFLINE_MODE = true OFFLINE_MODE = true
START_SSH_SERVER = false START_SSH_SERVER = false
DISABLE_ROUTER_LOG = true DISABLE_ROUTER_LOG = true
LANDING_PAGE = explore LANDING_PAGE = explore
[database]
DB_TYPE = postgres
HOST = ${database.get('host')}:${database.get('port')}
NAME = ${database.get('database')}
USER = ${database.get('username')}
PASSWD = ${database.get('password')}
SSL_MODE = disable
LOG_SQL = false
[admin] [admin]
DEFAULT_EMAIL_NOTIFICATIONS = onmention DEFAULT_EMAIL_NOTIFICATIONS = onmention
DISABLE_REGULAR_ORG_CREATION = true DISABLE_REGULAR_ORG_CREATION = true
[security] [security]
INTERNAL_TOKEN = ${internal_token}
INSTALL_LOCK = true INSTALL_LOCK = true
SECRET_KEY = ${security_secret_key}
LOGIN_REMEMBER_DAYS = 30 LOGIN_REMEMBER_DAYS = 30
DISABLE_GIT_HOOKS = ${str(not enable_git_hooks).lower()}
[openid] [openid]
ENABLE_OPENID_SIGNIN = false ENABLE_OPENID_SIGNIN = false
@ -40,13 +55,19 @@ ENABLE_OPENID_SIGNUP = false
[service] [service]
REGISTER_EMAIL_CONFIRM = true REGISTER_EMAIL_CONFIRM = true
ENABLE_NOTIFY_MAIL = true ENABLE_NOTIFY_MAIL = true
DISABLE_REGISTRATION = true DISABLE_REGISTRATION = false
ALLOW_ONLY_EXTERNAL_REGISTRATION = false ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = false ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = true DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = false DEFAULT_ALLOW_CREATE_ORGANIZATION = false
DEFAULT_ENABLE_TIMETRACKING = true DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.${domain}
[mailer]
ENABLED = true
MAILER_TYPE = sendmail
FROM = "${app_name}" <noreply@${domain}>
[session] [session]
PROVIDER = file PROVIDER = file
@ -59,6 +80,9 @@ ENABLE_FEDERATED_AVATAR = false
MODE = console MODE = console
LEVEL = warn LEVEL = warn
[oauth2]
JWT_SECRET = ${oauth_secret_key}
[other] [other]
SHOW_FOOTER_BRANDING = true SHOW_FOOTER_BRANDING = true
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
@ -66,10 +90,3 @@ SHOW_FOOTER_TEMPLATE_LOAD_TIME = false
[webhook] [webhook]
ALLOWED_HOST_LIST = * ALLOWED_HOST_LIST = *
DELIVER_TIMEOUT = 600 DELIVER_TIMEOUT = 600
[indexer]
REPO_INDEXER_ENABLED = true
MAX_FILE_SIZE = 10240000
[queue.issue_indexer]
LENGTH = 20

View file

@ -1,15 +1,8 @@
from os.path import join version = version=node.metadata.get('gitea/version')
from bundlewrap.utils.dicts import merge_dict
version = node.metadata.get('gitea/version')
assert not version.startswith('v')
arch = node.metadata.get('system/architecture')
downloads['/usr/local/bin/gitea'] = { downloads['/usr/local/bin/gitea'] = {
# https://forgejo.org/releases/ 'url': f'https://dl.gitea.io/gitea/{version}/gitea-{version}-linux-amd64',
'url': f'https://codeberg.org/forgejo/forgejo/releases/download/v{version}/forgejo-{version}-linux-{arch}', 'sha256': node.metadata.get('gitea/sha256'),
'sha256_url': '{url}.sha256',
'triggers': { 'triggers': {
'svc_systemd:gitea:restart', 'svc_systemd:gitea:restart',
}, },
@ -41,14 +34,8 @@ actions = {
} }
files['/etc/gitea/app.ini'] = { files['/etc/gitea/app.ini'] = {
'content': repo.libs.ini.dumps( 'content_type': 'mako',
merge_dict(
repo.libs.ini.parse(open(join(repo.path, 'bundles', 'gitea', 'files', 'app.ini')).read()),
node.metadata.get('gitea/conf'),
),
),
'owner': 'git', 'owner': 'git',
'mode': '0600',
'context': node.metadata['gitea'], 'context': node.metadata['gitea'],
'triggers': { 'triggers': {
'svc_systemd:gitea:restart', 'svc_systemd:gitea:restart',

View file

@ -1,4 +1,4 @@
database_password = repo.vault.password_for(f'{node.name} postgresql gitea').value database_password = repo.vault.password_for(f'{node.name} postgresql gitea')
defaults = { defaults = {
'apt': { 'apt': {
@ -11,20 +11,18 @@ defaults = {
}, },
}, },
'gitea': { 'gitea': {
'conf': { 'database': {
'DEFAULT': { 'host': 'localhost',
'WORK_PATH': '/var/lib/gitea', 'port': '5432',
}, 'username': 'gitea',
'database': { 'password': database_password,
'DB_TYPE': 'postgres', 'database': 'gitea',
'HOST': 'localhost:5432',
'NAME': 'gitea',
'USER': 'gitea',
'PASSWD': database_password,
'SSL_MODE': 'disable',
'LOG_SQL': 'false',
},
}, },
'app_name': 'Gitea',
'lfs_secret_key': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
'security_secret_key': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
'oauth_secret_key': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
'internal_token': repo.vault.password_for(f'{node.name} gitea internal_token'),
}, },
'postgresql': { 'postgresql': {
'roles': { 'roles': {
@ -43,7 +41,8 @@ defaults = {
'gitea.service': { 'gitea.service': {
'Unit': { 'Unit': {
'Description': 'gitea', 'Description': 'gitea',
'After': {'syslog.target', 'network.target'}, 'After': 'syslog.target',
'After': 'network.target',
'Requires': 'postgresql.service', 'Requires': 'postgresql.service',
}, },
'Service': { 'Service': {
@ -67,40 +66,21 @@ defaults = {
'home': '/home/git', 'home': '/home/git',
}, },
}, },
'zfs': {
'datasets': {
'tank/gitea': {
'mountpoint': '/var/lib/gitea',
},
},
},
} }
@metadata_reactor.provides( @metadata_reactor.provides(
'gitea/conf', 'zfs/datasets',
) )
def conf(metadata): def zfs(metadata):
domain = metadata.get('gitea/domain') if not node.has_bundle('zfs'):
return {}
return { return {
'gitea': { 'zfs': {
'conf': { 'datasets': {
'server': { f"{metadata.get('zfs/storage_classes/ssd')}/gitea": {
'SSH_DOMAIN': domain, 'mountpoint': '/var/lib/gitea',
'DOMAIN': domain,
'ROOT_URL': f'https://{domain}/',
'LFS_JWT_SECRET': repo.vault.password_for(f'{node.name} gitea lfs_secret_key', length=43),
},
'security': {
'INTERNAL_TOKEN': repo.vault.password_for(f'{node.name} gitea internal_token'),
'SECRET_KEY': repo.vault.password_for(f'{node.name} gitea security_secret_key'),
},
'service': {
'NO_REPLY_ADDRESS': f'noreply.{domain}',
},
'oauth2': {
'JWT_SECRET': repo.vault.password_for(f'{node.name} gitea oauth_secret_key', length=43),
}, },
}, },
}, },
@ -118,7 +98,7 @@ def nginx(metadata):
'content': 'nginx/proxy_pass.conf', 'content': 'nginx/proxy_pass.conf',
'context': { 'context': {
'target': 'http://127.0.0.1:3500', 'target': 'http://127.0.0.1:3500',
}, }
}, },
}, },
}, },

View file

@ -18,17 +18,16 @@ admin_password = node.metadata.get('grafana/config/security/admin_password')
port = node.metadata.get('grafana/config/server/http_port') port = node.metadata.get('grafana/config/server/http_port')
actions['reset_grafana_admin_password'] = { actions['reset_grafana_admin_password'] = {
'command': f"grafana-cli admin reset-admin-password {quote(admin_password)}", 'command': f"grafana-cli admin reset-admin-password {quote(admin_password)}",
'unless': f"sleep 5 && curl http://admin:{quote(admin_password)}@localhost:{port}/api/org --fail", 'unless': f"curl http://admin:{quote(admin_password)}@localhost:{port}/api/org",
'needs': [ 'needs': [
'svc_systemd:grafana-server', 'svc_systemd:grafana-server',
], ],
} }
directories = { directories = {
'/etc/grafana': {}, '/etc/grafana': {
},
'/etc/grafana/provisioning': { '/etc/grafana/provisioning': {
'owner': 'grafana',
'group': 'grafana',
}, },
'/etc/grafana/provisioning/datasources': { '/etc/grafana/provisioning/datasources': {
'purge': True, 'purge': True,
@ -36,13 +35,8 @@ directories = {
'/etc/grafana/provisioning/dashboards': { '/etc/grafana/provisioning/dashboards': {
'purge': True, 'purge': True,
}, },
'/var/lib/grafana': { '/var/lib/grafana': {},
'owner': 'grafana',
'group': 'grafana',
},
'/var/lib/grafana/dashboards': { '/var/lib/grafana/dashboards': {
'owner': 'grafana',
'group': 'grafana',
'purge': True, 'purge': True,
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
@ -53,8 +47,6 @@ directories = {
files = { files = {
'/etc/grafana/grafana.ini': { '/etc/grafana/grafana.ini': {
'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')), 'content': repo.libs.ini.dumps(node.metadata.get('grafana/config')),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
], ],
@ -64,8 +56,6 @@ files = {
'apiVersion': 1, 'apiVersion': 1,
'datasources': list(node.metadata.get('grafana/datasources').values()), 'datasources': list(node.metadata.get('grafana/datasources').values()),
}), }),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
], ],
@ -82,8 +72,6 @@ files = {
}, },
}], }],
}), }),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
], ],
@ -172,8 +160,6 @@ for dashboard_id, monitored_node in enumerate(monitored_nodes, start=1):
files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = { files[f'/var/lib/grafana/dashboards/{monitored_node.name}.json'] = {
'content': json.dumps(dashboard, indent=4), 'content': json.dumps(dashboard, indent=4),
'owner': 'grafana',
'group': 'grafana',
'triggers': [ 'triggers': [
'svc_systemd:grafana-server:restart', 'svc_systemd:grafana-server:restart',
] ]

View file

@ -8,33 +8,16 @@ defaults = {
'grafana': {}, 'grafana': {},
}, },
'sources': { 'sources': {
'grafana': { 'deb https://packages.grafana.com/oss/deb stable main',
'urls': {
'https://packages.grafana.com/oss/deb',
},
'suites': {
'stable',
},
'components': {
'main',
},
},
}, },
}, },
'grafana': { 'grafana': {
'config': { 'config': {
'server': { 'server': {
'http_port': 8300, 'http_port': 8300,
'http_addr': '127.0.0.1',
'enable_gzip': True,
}, },
'database': { 'database': {
'type': 'postgres', 'url': f'postgres://grafana:{postgres_password}@localhost:5432/grafana',
'host': '127.0.0.1:5432',
'name': 'grafana',
'user': 'grafana',
'password': postgres_password,
}, },
'remote_cache': { 'remote_cache': {
'type': 'redis', 'type': 'redis',
@ -69,9 +52,6 @@ defaults = {
}, },
}, },
}, },
'nginx': {
'has_websockets': True,
},
} }
@ -86,7 +66,7 @@ def domain(metadata):
'domain': metadata.get('grafana/hostname'), 'domain': metadata.get('grafana/hostname'),
}, },
}, },
}, },
} }
@metadata_reactor.provides( @metadata_reactor.provides(
@ -94,7 +74,7 @@ def domain(metadata):
) )
def influxdb2(metadata): def influxdb2(metadata):
influxdb_metadata = repo.get_node(metadata.get('grafana/influxdb_node')).metadata.get('influxdb') influxdb_metadata = repo.get_node(metadata.get('grafana/influxdb_node')).metadata.get('influxdb')
return { return {
'grafana': { 'grafana': {
'datasources': { 'datasources': {
@ -113,7 +93,7 @@ def influxdb2(metadata):
'isDefault': True, 'isDefault': True,
}, },
}, },
}, },
} }
@ -126,7 +106,7 @@ def datasource_key_to_name(metadata):
'datasources': { 'datasources': {
name: {'name': name} for name in metadata.get('grafana/datasources').keys() name: {'name': name} for name in metadata.get('grafana/datasources').keys()
}, },
}, },
} }
@ -136,7 +116,7 @@ def datasource_key_to_name(metadata):
def dns(metadata): def dns(metadata):
return { return {
'dns': { 'dns': {
metadata.get('grafana/hostname'): repo.libs.ip.get_a_records(metadata), metadata.get('grafana/hostname'): repo.libs.dns.get_a_records(metadata),
} }
} }
@ -147,7 +127,6 @@ def dns(metadata):
def nginx(metadata): def nginx(metadata):
return { return {
'nginx': { 'nginx': {
'has_websockets': True,
'vhosts': { 'vhosts': {
metadata.get('grafana/hostname'): { metadata.get('grafana/hostname'): {
'content': 'grafana/vhost.conf', 'content': 'grafana/vhost.conf',

View file

@ -1,23 +0,0 @@
https://github.com/home-assistant/supervised-installer?tab=readme-ov-file
https://github.com/home-assistant/os-agent/tree/main?tab=readme-ov-file#using-home-assistant-supervised-on-debian
https://docs.docker.com/engine/install/debian/
https://www.home-assistant.io/installation/linux#install-home-assistant-supervised
https://github.com/home-assistant/supervised-installer
https://github.com/home-assistant/architecture/blob/master/adr/0014-home-assistant-supervised.md
DATA_SHARE=/usr/share/hassio dpkg --force-confdef --force-confold -i homeassistant-supervised.deb
neu debian
ha installieren
gucken ob geht
dann bw drüberbügeln
https://www.home-assistant.io/integrations/http/#ssl_certificate
`wget "$(curl -L https://api.github.com/repos/home-assistant/supervised-installer/releases/latest | jq -r '.assets[0].browser_download_url')" -O homeassistant-supervised.deb && dpkg -i homeassistant-supervised.deb`

View file

@ -1,30 +0,0 @@
from shlex import quote
version = node.metadata.get('homeassistant/os_agent_version')
directories = {
'/usr/share/hassio': {},
}
actions = {
'install_os_agent': {
'command': ' && '.join([
f'wget -O /tmp/os-agent.deb https://github.com/home-assistant/os-agent/releases/download/{quote(version)}/os-agent_{quote(version)}_linux_aarch64.deb',
'DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/os-agent.deb',
]),
'unless': f'test "$(apt -qq list os-agent | cut -d" " -f2)" = "{quote(version)}"',
'needs': {
'pkg_apt:',
'zfs_dataset:tank/homeassistant',
},
},
'install_homeassistant_supervised': {
'command': 'wget -O /tmp/homeassistant-supervised.deb https://github.com/home-assistant/supervised-installer/releases/latest/download/homeassistant-supervised.deb && apt install /tmp/homeassistant-supervised.deb',
'unless': 'apt -qq list homeassistant-supervised | grep -q "installed"',
'needs': {
'action:install_os_agent',
},
},
}

View file

@ -1,65 +0,0 @@
defaults = {
'apt': {
'packages': {
# homeassistant-supervised
'apparmor': {},
'bluez': {},
'cifs-utils': {},
'curl': {},
'dbus': {},
'jq': {},
'libglib2.0-bin': {},
'lsb-release': {},
'network-manager': {},
'nfs-common': {},
'systemd-journal-remote': {},
'systemd-resolved': {},
'udisks2': {},
'wget': {},
# docker
'docker-ce': {},
'docker-ce-cli': {},
'containerd.io': {},
'docker-buildx-plugin': {},
'docker-compose-plugin': {},
},
'sources': {
# docker: https://docs.docker.com/engine/install/debian/#install-using-the-repository
'docker': {
'urls': {
'https://download.docker.com/linux/debian',
},
'suites': {
'{codename}',
},
'components': {
'stable',
},
},
},
},
'zfs': {
'datasets': {
'tank/homeassistant': {
'mountpoint': '/usr/share/hassio',
'needed_by': {
'directory:/usr/share/hassio',
},
},
},
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('homeassistant/domain'): {
'content': 'homeassistant/vhost.conf',
},
},
},
}

View file

@ -0,0 +1,20 @@
users = {
'homeassistant': {
'home': '/var/lib/homeassistant',
},
}
directories = {
'/var/lib/homeassistant': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/config': {
'owner': 'homeassistant',
},
'/var/lib/homeassistant/venv': {
'owner': 'homeassistant',
},
}
# https://wiki.instar.com/de/Software/Linux/Home_Assistant/

View file

@ -0,0 +1,9 @@
defaults = {
'apt': {
'packages': {
'python3-dev': {},
'python3-pip': {},
'python3-venv': {},
},
},
}

View file

@ -23,6 +23,6 @@ def hostname_file(metadata):
def dns(metadata): def dns(metadata):
return { return {
'dns': { 'dns': {
metadata.get('hostname'): repo.libs.ip.get_a_records(metadata), metadata.get('hostname'): repo.libs.dns.get_a_records(metadata, external=False),
}, },
} }

View file

@ -24,7 +24,7 @@ header_margin=1
detailed_cpu_time=0 detailed_cpu_time=0
cpu_count_from_one=1 cpu_count_from_one=1
show_cpu_usage=0 show_cpu_usage=0
show_cpu_frequency=1 show_cpu_frequency=0
show_cpu_temperature=0 show_cpu_temperature=0
degree_fahrenheit=0 degree_fahrenheit=0
update_process_names=0 update_process_names=0

View file

@ -1,36 +0,0 @@
#!/bin/sh
UNKNOWN=3
if [ -z "$SSHMON_COMMAND" ]
then
echo 'check_by_sshmon: Env SSHMON_COMMAND missing' >&2
exit $UNKNOWN
elif [ -z "$SSHMON_HOST" ]
then
echo 'check_by_sshmon: Env SSHMON_HOST missing' >&2
exit $UNKNOWN
fi
if [ -z "$SSHMON_SUDO" ]
then
PREFIX=""
else
PREFIX="sudo "
fi
ssh sshmon@"$SSHMON_HOST" "$PREFIX$SSHMON_COMMAND"
exitcode=$?
if [ "$exitcode" = 124 ]
then
echo 'check_by_sshmon: Timeout while running check remotely' >&2
exit $UNKNOWN
elif [ "$exitcode" = 255 ]
then
echo 'check_by_sshmon: SSH error' >&2
exit $UNKNOWN
else
exit $exitcode
fi

View file

@ -6,18 +6,6 @@
* optional parameters. * optional parameters.
*/ */
object CheckCommand "sshmon" {
import "ipv4-or-ipv6"
command = [ "/usr/lib/nagios/plugins/check_by_sshmon" ]
env.SSHMON_COMMAND = "$command$"
env.SSHMON_HOST = "$address$"
env.SSHMON_SUDO = "$sudo$"
}
object NotificationCommand "mail-host-notification" { object NotificationCommand "mail-host-notification" {
command = [ ConfigDir + "/scripts/mail-host-notification.sh" ] command = [ ConfigDir + "/scripts/mail-host-notification.sh" ]

View file

@ -13,9 +13,9 @@ apply Notification "mail-icingaadmin" to Host {
user_groups = host.vars.notification.mail.groups user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail assign where host.vars.notification.mail
} }
@ -25,9 +25,9 @@ apply Notification "mail-icingaadmin" to Service {
user_groups = host.vars.notification.mail.groups user_groups = host.vars.notification.mail.groups
users = host.vars.notification.mail.users users = host.vars.notification.mail.users
//interval = 2h
//vars.notification_logtosyslog = true
assign where host.vars.notification.mail assign where host.vars.notification.mail
} }

View file

@ -1,7 +0,0 @@
/**
* The JournaldLogger type writes log information to the systemd journal.
*/
object JournaldLogger "journald" {
severity = "warning"
}

View file

@ -1,4 +0,0 @@
/**
* This file is requires for inital apt install.
* The JournaldLogger type writes log information to the systemd journal.
*/

View file

@ -0,0 +1,3 @@
object SyslogLogger "syslog" {
severity = "warning"
}

View file

@ -14,8 +14,7 @@
if key.endswith('_interval'): if key.endswith('_interval'):
return value return value
else: else:
escaped_value = value.replace('$', '$$').replace('"', '\\"') return f'"{value}"'
return f'"{escaped_value}"'
elif isinstance(value, (list, set)): elif isinstance(value, (list, set)):
return '[' + ', '.join(render_value(e) for e in sorted(value)) + ']' return '[' + ', '.join(render_value(e) for e in sorted(value)) + ']'
else: else:
@ -29,7 +28,7 @@ object Host "${host_name}" {
% endfor % endfor
} }
% for service_name, service_config in sorted(services.items()): % for service_name, service_config in sorted(services.items(), key=lambda e: [e[1]['vars.bundle'], e[0]]):
object Service "${service_name}" { object Service "${service_name}" {
import "generic-service" import "generic-service"
% for key, value in sorted(service_config.items()): % for key, value in sorted(service_config.items()):

View file

@ -5,6 +5,6 @@ include <itl>
include <plugins> include <plugins>
include <plugins-contrib> include <plugins-contrib>
include "features-enabled/*.conf" include "features.d/*.conf"
include_recursive "conf.d" include_recursive "conf.d"
include "hosts.d/*.conf" include "hosts.d/*.conf"

View file

@ -10,24 +10,6 @@ directories = {
'svc_systemd:icinga2.service:restart', 'svc_systemd:icinga2.service:restart',
], ],
}, },
'/etc/icinga2/pki': { # required for apt install
'purge': True,
'owner': 'nagios',
'group': 'nagios',
'mode': '0750',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/zones.d': { # required for apt install
'purge': True,
'owner': 'nagios',
'group': 'nagios',
'mode': '0750',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/conf.d': { '/etc/icinga2/conf.d': {
'purge': True, 'purge': True,
'owner': 'nagios', 'owner': 'nagios',
@ -46,16 +28,7 @@ directories = {
'svc_systemd:icinga2.service:restart', 'svc_systemd:icinga2.service:restart',
], ],
}, },
'/etc/icinga2/features-available': { '/etc/icinga2/features.d': {
'purge': True,
'owner': 'nagios',
'group': 'nagios',
'mode': '0750',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features-enabled': {
'purge': True, 'purge': True,
'owner': 'nagios', 'owner': 'nagios',
'group': 'nagios', 'group': 'nagios',
@ -196,6 +169,50 @@ files = {
'svc_systemd:icinga2.service:restart', 'svc_systemd:icinga2.service:restart',
], ],
}, },
'/etc/icinga2/features.d/ido-pgsql.conf': {
'source': 'features/ido-pgsql.conf',
'content_type': 'mako',
'owner': 'nagios',
'group': 'nagios',
'context': {
'db_password': node.metadata.get('postgresql/roles/icinga2/password')
},
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/syslog.conf': {
'source': 'features/syslog.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/notification.conf': {
'source': 'features/notification.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/checker.conf': {
'source': 'features/checker.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/etc/icinga2/features.d/api.conf': {
'source': 'features/api.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
},
'/var/lib/icinga2/certs/ca.crt': { '/var/lib/icinga2/certs/ca.crt': {
'content_type': 'download', 'content_type': 'download',
'source': f'https://letsencrypt.org/certs/isrg-root-x1-cross-signed.pem', 'source': f'https://letsencrypt.org/certs/isrg-root-x1-cross-signed.pem',
@ -205,49 +222,11 @@ files = {
'svc_systemd:icinga2.service:restart', 'svc_systemd:icinga2.service:restart',
], ],
}, },
'/usr/lib/nagios/plugins/check_by_sshmon': {
'mode': '0755',
},
} }
# FEATURES
for feature, context in {
'mainlog': {},
# 'journald': {}, FIXME
'notification': {},
'checker': {},
'api': {},
'ido-pgsql': {
'db_password': node.metadata.get('postgresql/roles/icinga2/password'),
},
}.items():
files[f'/etc/icinga2/features-available/{feature}.conf'] = {
'content_type': 'mako' if context else 'text',
'context': context,
'source': f'features/{feature}.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
}
symlinks[f'/etc/icinga2/features-enabled/{feature}.conf'] = {
'target': f'/etc/icinga2/features-available/{feature}.conf',
'owner': 'nagios',
'group': 'nagios',
'triggers': [
'svc_systemd:icinga2.service:restart',
],
}
# HOSTS
for other_node in repo.nodes: for other_node in repo.nodes:
if other_node.dummy: if other_node.dummy:
continue continue
elif not other_node.in_group('monitored'):
continue
files[f'/etc/icinga2/hosts.d/{other_node.name}.conf'] = { files[f'/etc/icinga2/hosts.d/{other_node.name}.conf'] = {
'content_type': 'mako', 'content_type': 'mako',
@ -258,7 +237,7 @@ for other_node in repo.nodes:
'host_settings': { 'host_settings': {
'address': str(ip_interface(other_node.metadata.get('network/internal/ipv4', None) or other_node.metadata.get('wireguard/my_ip')).ip), 'address': str(ip_interface(other_node.metadata.get('network/internal/ipv4', None) or other_node.metadata.get('wireguard/my_ip')).ip),
}, },
'services': other_node.metadata.get('monitoring/services'), 'services': other_node.metadata.get('monitoring', {}),
}, },
'triggers': [ 'triggers': [
'svc_systemd:icinga2.service:restart', 'svc_systemd:icinga2.service:restart',
@ -269,7 +248,7 @@ svc_systemd = {
'icinga2.service': { 'icinga2.service': {
'needs': [ 'needs': [
'pkg_apt:icinga2-ido-pgsql', 'pkg_apt:icinga2-ido-pgsql',
'svc_systemd:postgresql.service', 'svc_systemd:postgresql',
], ],
}, },
} }

View file

@ -9,21 +9,7 @@ defaults = {
'monitoring-plugins': {}, 'monitoring-plugins': {},
}, },
'sources': { 'sources': {
'icinga': { 'deb https://packages.icinga.com/debian icinga-{release} main',
'types': {
'deb',
'deb-src',
},
'urls': {
'https://packages.icinga.com/debian',
},
'suites': {
'icinga-{codename}',
},
'components': {
'main',
},
},
}, },
}, },
'icinga2': { 'icinga2': {
@ -34,11 +20,6 @@ defaults = {
} }
}, },
}, },
'nftables': {
'input': {
'tcp dport 5665 accept',
},
},
'postgresql': { 'postgresql': {
'databases': { 'databases': {
'icinga2': { 'icinga2': {
@ -51,18 +32,13 @@ defaults = {
}, },
}, },
}, },
'users': {
'nagios': {
'home': '/var/lib/nagios',
'shell': '/usr/sbin/nologin',
},
},
'zfs': { 'zfs': {
'datasets': { 'datasets': {
'tank/icinga2': { 'tank/icinga2': {
'mountpoint': '/var/lib/icinga2', 'mountpoint': '/var/lib/icinga2',
'needed_by': { 'needed_by': {
'pkg_apt:icinga2', 'pkg_apt:icinga2',
'pkg_apt:icingaweb2',
'pkg_apt:icinga2-ido-pgsql', 'pkg_apt:icinga2-ido-pgsql',
}, },
}, },
@ -72,7 +48,7 @@ defaults = {
@metadata_reactor.provides( @metadata_reactor.provides(
'letsencrypt/domains', 'nginx/vhosts',
) )
def letsencrypt(metadata): def letsencrypt(metadata):
return { return {

14
bundles/icingadb/items.py Normal file
View file

@ -0,0 +1,14 @@
import yaml, json
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/icingadb/config.yml': {
'content': yaml.dump(
json.loads(
json.dumps(node.metadata.get('icingadb'), sort_keys=True, cls=MetadataJSONEncoder)
),
),
'mode': '0640',
'owner': 'icingadb',
},
}

View file

@ -0,0 +1,53 @@
defaults = {
'apt': {
'packages': {
'icingadb': {},
'icingadb-redis': {},
'icingadb-web': {},
},
'sources': {
'deb https://packages.icinga.com/debian icinga-{release} main',
'deb https://packages.icinga.com/debian icinga-{release}-snapshots main',
},
},
'postgresql': {
'databases': {
'icingadb': {
'owner': 'icingadb',
},
},
'roles': {
'icingadb': {
'password': repo.vault.password_for(f'psql icingadb on {node.name}'),
},
},
},
'redis': {
'icingadb': {
'port': '6381',
},
},
}
@metadata_reactor.provides(
'icingadb',
)
def config(metadata):
return {
'icingadb': {
'database': {
'type': 'postgresql',
'host': 'localhost',
'port': 3306,
'database': 'icingadb',
'user': 'icingadb',
'password': metadata.get('postgresql/roles/icingadb/password'),
},
'redis': {
'address': 'localhost:6380',
},
'logging': {
'level': 'info',
},
},
}

View file

@ -2,4 +2,3 @@
- open /icingaweb2/setup in browser - open /icingaweb2/setup in browser
- fill in values from metadata - fill in values from metadata
- apply - apply
- make sure tls cert exists and is owned by nagios

View file

@ -4,27 +4,18 @@ directories = {
'owner': 'www-data', 'owner': 'www-data',
'group': 'icingaweb2', 'group': 'icingaweb2',
'mode': '2770', 'mode': '2770',
'needs': [
'pkg_apt:icingaweb2',
],
}, },
'/etc/icingaweb2/enabledModules': { '/etc/icingaweb2/enabledModules': {
# 'purge': True, # 'purge': True,
'owner': 'www-data', 'owner': 'www-data',
'group': 'icingaweb2', 'group': 'icingaweb2',
'mode': '2770', 'mode': '2770',
'needs': [
'pkg_apt:icingaweb2',
],
}, },
'/etc/icingaweb2/modules': { '/etc/icingaweb2/modules': {
# 'purge': True, # 'purge': True,
'owner': 'www-data', 'owner': 'www-data',
'group': 'icingaweb2', 'group': 'icingaweb2',
'mode': '2770', 'mode': '2770',
'needs': [
'pkg_apt:icingaweb2',
],
}, },
} }
@ -34,9 +25,6 @@ files = {
'owner': 'www-data', 'owner': 'www-data',
'group': 'icingaweb2', 'group': 'icingaweb2',
'mode': '0660', 'mode': '0660',
'needs': [
'pkg_apt:icingaweb2',
],
}, },
} }
@ -45,9 +33,6 @@ symlinks = {
'target': '/usr/share/icingaweb2/modules/monitoring', 'target': '/usr/share/icingaweb2/modules/monitoring',
'owner': 'www-data', 'owner': 'www-data',
'group': 'icingaweb2', 'group': 'icingaweb2',
'needs': [
'pkg_apt:icingaweb2',
],
}, },
} }
@ -63,9 +48,6 @@ for name in [
'owner': 'www-data', 'owner': 'www-data',
'group': 'icingaweb2', 'group': 'icingaweb2',
'mode': '0660', 'mode': '0660',
'needs': [
'pkg_apt:icingaweb2',
],
} }
for name in [ for name in [
@ -78,7 +60,4 @@ for name in [
'owner': 'www-data', 'owner': 'www-data',
'group': 'icingaweb2', 'group': 'icingaweb2',
'mode': '0660', 'mode': '0660',
'needs': [
'pkg_apt:icingaweb2',
],
} }

View file

@ -3,6 +3,7 @@ from hashlib import sha3_256
defaults = { defaults = {
'apt': { 'apt': {
'packages': { 'packages': {
'icingaweb2': {},
'php-ldap': {}, 'php-ldap': {},
'php-json': {}, 'php-json': {},
'php-intl': {}, 'php-intl': {},
@ -10,25 +11,11 @@ defaults = {
'php-gd': {}, 'php-gd': {},
'php-imagick': {}, 'php-imagick': {},
'php-pgsql': {}, 'php-pgsql': {},
'icingaweb2': {}, 'icingaweb2-module-monitoring': {},
#'icingaweb2-module-monitoring': {}, # ?
}, },
'sources': { 'sources': {
'icinga': { 'deb https://packages.icinga.com/debian icinga-{release} main',
'types': { 'deb https://packages.icinga.com/debian icinga-{release}-snapshots main',
'deb',
'deb-src',
},
'urls': {
'https://packages.icinga.com/debian',
},
'suites': {
'icinga-{codename}',
},
'components': {
'main',
},
},
}, },
}, },
'icingaweb2': { 'icingaweb2': {
@ -131,7 +118,7 @@ defaults = {
@metadata_reactor.provides( @metadata_reactor.provides(
'icingaweb2/hostname', 'icingaweb2/hostname',
'icingaweb2/resources.ini/icinga_ido/password', 'icingaweb2/resources.ini/icinga_ido/icinga2/password',
'icingaweb2/monitoring/commandtransports.ini/icinga2/password', 'icingaweb2/monitoring/commandtransports.ini/icinga2/password',
) )
def stuff(metadata): def stuff(metadata):
@ -177,7 +164,6 @@ def nginx(metadata):
metadata.get('icingaweb2/hostname'): { metadata.get('icingaweb2/hostname'): {
'content': 'icingaweb2/vhost.conf', 'content': 'icingaweb2/vhost.conf',
'context': { 'context': {
'php_version': metadata.get('php/version'),
}, },
}, },
}, },

View file

@ -1,3 +0,0 @@
# svc_systemd = {
# 'ifupdown.service': {},
# }

View file

@ -4,9 +4,8 @@ from shlex import quote
directories['/var/lib/influxdb'] = { directories['/var/lib/influxdb'] = {
'owner': 'influxdb', 'owner': 'influxdb',
'group': 'influxdb', 'group': 'influxdb',
'mode': '0750',
'needs': [ 'needs': [
'zfs_dataset:tank/influxdb', f"zfs_dataset:{node.metadata.get('zfs/storage_classes/ssd')}/influxdb",
], ],
} }

View file

@ -7,22 +7,7 @@ defaults = {
'influxdb2-cli': {}, 'influxdb2-cli': {},
}, },
'sources': { 'sources': {
'influxdata': { 'deb https://repos.influxdata.com/debian {release} stable',
'urls': {
'https://repos.influxdata.com/debian',
},
'suites': {
'stable',
},
'components': {
'main',
},
},
},
},
'nftables': {
'input': {
'tcp dport 8200 accept',
}, },
}, },
'influxdb': { 'influxdb': {
@ -62,7 +47,7 @@ def zfs(metadata):
return { return {
'zfs': { 'zfs': {
'datasets': { 'datasets': {
'tank/influxdb': { f"{metadata.get('zfs/storage_classes/ssd')}/influxdb": {
'mountpoint': '/var/lib/influxdb', 'mountpoint': '/var/lib/influxdb',
'recordsize': '8192', 'recordsize': '8192',
'atime': 'off', 'atime': 'off',
@ -78,7 +63,7 @@ def zfs(metadata):
def dns(metadata): def dns(metadata):
return { return {
'dns': { 'dns': {
metadata.get('influxdb/hostname'): repo.libs.ip.get_a_records(metadata), metadata.get('influxdb/hostname'): repo.libs.dns.get_a_records(metadata),
} }
} }

View file

@ -19,7 +19,7 @@ def apt(metadata):
return { return {
'apt': { 'apt': {
'packages': { 'packages': {
f'openjdk-{metadata.get("java/version")}-jre-headless': {}, f'openjdk-{metadata.get("java/version")}-jre': {},
} }
} }
} }

View file

@ -1,21 +0,0 @@
from json import dumps
from bundlewrap.metadata import MetadataJSONEncoder
files = {
'/etc/kea/kea-dhcp4.conf': {
'content': dumps(node.metadata.get('kea'), indent=4, sort_keys=True, cls=MetadataJSONEncoder),
'triggers': [
'svc_systemd:kea-dhcp4-server:restart',
],
},
}
svc_systemd = {
'kea-dhcp4-server': {
'needs': [
'pkg_apt:kea-dhcp4-server',
'file:/etc/kea/kea-dhcp4.conf',
'svc_systemd:systemd-networkd.service:restart',
],
},
}

View file

@ -1,96 +0,0 @@
from ipaddress import ip_interface, ip_network
hashable = repo.libs.hashable.hashable
defaults = {
'apt': {
'packages': {
'kea-dhcp4-server': {},
},
},
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': set(),
},
'lease-database': {
'type': 'memfile',
'lfc-interval': 3600
},
'subnet4': set(),
'loggers': set([
hashable({
'name': 'kea-dhcp4',
'output_options': [
{
'output': 'syslog',
}
],
'severity': 'INFO',
}),
]),
},
},
}
@metadata_reactor.provides(
'kea/Dhcp4/interfaces-config/interfaces',
'kea/Dhcp4/subnet4',
)
def subnets(metadata):
subnet4 = set()
interfaces = set()
reservations = set(
hashable({
'hw-address': network_conf['mac'],
'ip-address': str(ip_interface(network_conf['ipv4']).ip),
})
for other_node in repo.nodes
for network_conf in other_node.metadata.get('network', {}).values()
if 'mac' in network_conf
)
for network_name, network_conf in metadata.get('network').items():
dhcp_server_config = network_conf.get('dhcp_server_config', None)
if dhcp_server_config:
_network = ip_network(dhcp_server_config['subnet'])
subnet4.add(hashable({
'subnet': dhcp_server_config['subnet'],
'pools': [
{
'pool': f'{dhcp_server_config['pool_from']} - {dhcp_server_config['pool_to']}',
},
],
'option-data': [
{
'name': 'routers',
'data': dhcp_server_config['router'],
},
{
'name': 'domain-name-servers',
'data': '10.0.0.1',
},
],
'reservations': set(
reservation
for reservation in reservations
if ip_interface(reservation['ip-address']).ip in _network
),
}))
interfaces.add(network_conf.get('interface', network_name))
return {
'kea': {
'Dhcp4': {
'interfaces-config': {
'interfaces': interfaces,
},
'subnet4': subnet4,
},
},
}

View file

@ -1,40 +0,0 @@
hostname "CroneKorkN : ${name}"
sv_contact "admin@sublimity.de"
sv_steamgroup "${','.join(steamgroups)}"
rcon_password "${rcon_password}"
motd_enabled 0
sv_cheats 1
sv_consistency 0
sv_lan 0
sv_allow_lobby_connect_only 0
sv_gametypes "coop,realism,survival,versus,teamversus,scavenge,teamscavenge"
sv_minrate 30000
sv_maxrate 60000
sv_mincmdrate 66
sv_maxcmdrate 101
sv_logsdir "logs-${name}" //Folder in the game directory where server logs will be stored.
log on //Creates a logfile (on | off)
sv_logecho 0 //default 0; Echo log information to the console.
sv_logfile 1 //default 1; Log server information in the log file.
sv_log_onefile 0 //default 0; Log server information to only one file.
sv_logbans 1 //default 0;Log server bans in the server logs.
sv_logflush 0 //default 0; Flush the log files to disk on each write (slow).

View file

@ -1,122 +1,106 @@
assert node.has_bundle('steam') and node.has_bundle('steam-workshop-download')
directories = { directories = {
'/opt/steam/left4dead2-servers': { '/opt/left4dead2': {
'owner': 'steam', 'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True,
}, },
# Current zfs doesnt support zfs upperdir. The support was added in October 2022. Move upperdir - unused anyway - '/opt/left4dead2/ems/admin system': {
# to another dir. Also move workdir alongside it, as it has to be on same fs. 'owner': 'steam',
'/opt/steam-zfs-overlay-workarounds': { },
'/opt/left4dead2/left4dead2/cfg': {
'owner': 'steam',
},
'/opt/left4dead2/left4dead2/addons': {
'owner': 'steam', 'owner': 'steam',
'group': 'steam',
'mode': '0755',
'purge': True, 'purge': True,
}, },
} }
# /opt/steam/steam/.steam/sdk32/steamclient.so: cannot open shared object file: No such file or directory files = {
symlinks = { '/opt/left4dead2/ems/admin system/admins.txt': {
'/opt/steam/steam/.steam/sdk32': {
'target': '/opt/steam/steam/linux32',
'owner': 'steam', 'owner': 'steam',
'group': 'steam', 'content': '\n'.join(node.metadata.get('left4dead2/admins')),
} }
} }
# svc_systemd = {
# SERVERS 'left4dead2-workshop': {
# 'running': False,
for name, config in node.metadata.get('left4dead2/servers').items():
#overlay
directories[f'/opt/steam/left4dead2-servers/{name}'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/upper'] = {
'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam-zfs-overlay-workarounds/{name}/workdir'] = {
'owner': 'steam',
'group': 'steam',
}
# conf
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg'] = {
'content_type': 'mako',
'source': 'server.cfg',
'context': {
'name': name,
'steamgroups': node.metadata.get('left4dead2/steamgroups'),
'rcon_password': config['rcon_password'],
},
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# service
svc_systemd[f'left4dead2-{name}.service'] = {
'needs': [ 'needs': [
f'file:/opt/steam/left4dead2-servers/{name}/left4dead2/cfg/server.cfg', 'svc_systemd:steam-update',
f'file:/usr/local/lib/systemd/system/left4dead2-{name}.service',
], ],
} },
}
# for id in node.metadata.get('left4dead2/workshop'):
# ADDONS directories[f'/opt/left4dead2/left4dead2/addons/{id}'] = {
#
# base
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/readme.txt'] = {
'content_type': 'any',
'owner': 'steam', 'owner': 'steam',
'group': 'steam',
}
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons'] = {
'owner': 'steam',
'group': 'steam',
'purge': True,
'triggers': [ 'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart', 'svc_systemd:left4dead2-workshop:restart',
], ],
} }
for id in [
*config.get('workshop', []),
*node.metadata.get('left4dead2/workshop'),
]:
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons/{id}.vpk'] = {
'content_type': 'any',
'owner': 'steam',
'group': 'steam',
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
],
}
# admin system server_units = set()
for name, config in node.metadata.get('left4dead2/servers').items():
config.pop('port')
config = {
'hostname': name,
'sv_steamgroup': ','.join(
str(gid) for gid in node.metadata.get('left4dead2/steamgroups')
),
'z_difficulty': 'Impossible',
'sv_gametypes': 'realism',
'sv_region': 3, # europe
'log': 'on',
'sv_logecho': 1,
'sv_logfile': 1,
'sv_log_onefile': 0,
'sv_logbans': 1,
'sv_logflush': 0,
'sv_logsdir': 'logs', # /opt/left4dead2/left4dead2/logs
**config,
}
files[f'/opt/left4dead2/left4dead2/cfg/server-{name}.cfg'] = {
'content': '\n'.join(
f'{key} "{value}"' for key, value in sorted(config.items())
) + '\n',
'owner': 'steam',
'triggers': [
f'svc_systemd:left4dead2-server-{name}:restart',
],
}
svc_systemd[f'left4dead2-server-{name}'] = {
'needs': [
f'file:/usr/local/lib/systemd/system/left4dead2-server-{name}.service',
],
}
server_units.add(f'left4dead2-server-{name}')
directories[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system'] = { for id in node.metadata.get('left4dead2/workshop'):
directories[f'/opt/left4dead2/addons/{id}'] = {
'owner': 'steam', 'owner': 'steam',
'group': 'steam',
'mode': '0755',
'triggers': [ 'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart', 'svc_systemd:left4dead2-workshop:restart',
],
}
files[f'/opt/steam/left4dead2-servers/{name}/left4dead2/ems/admin system/admins.txt'] = {
'owner': 'steam',
'group': 'steam',
'mode': '0755',
'content': '\n'.join(sorted(node.metadata.get('left4dead2/admins'))),
'triggers': [
f'svc_systemd:left4dead2-{name}.service:restart',
], ],
} }
# TIDYUP
find_obsolete_units = (
'find /usr/local/lib/systemd/system -type f -name "left4dead2-server-*.service" ' +
' '.join(f"! -name '{name}.service'" for name in server_units)
)
actions['remove_obsolete_left4dead2_units'] = {
'command': (
f'for unitfile in $({find_obsolete_units}); '
f'do '
f'systemctl stop $(basename "$unitfile"); '
f'systemctl disable $(basename "$unitfile"); '
f'rm "$unitfile"; '
f'systemctl daemon-reload; '
f'done'
),
'unless': (
find_obsolete_units + " | wc -l | grep -q '^0$'"
),
}

View file

@ -5,7 +5,7 @@ from shlex import quote
defaults = { defaults = {
'steam': { 'steam': {
'games': { 'games': {
'left4dead2': 222860, 'left4dead2': '222860',
}, },
}, },
'left4dead2': { 'left4dead2': {
@ -17,85 +17,64 @@ defaults = {
@metadata_reactor.provides( @metadata_reactor.provides(
'left4dead2/servers', 'systemd/units',
) )
def rconn_password(metadata): def workshop(metadata):
# only works from localhost! command = (
'set -x; '
'for ID in ' + ' '.join(metadata.get('left4dead2/workshop')) + '; '
'do '
'if ! ls /opt/left4dead2/left4dead2/addons/$ID/*.vpk; '
'then '
'cd /opt/left4dead2/left4dead2/addons/$ID; '
'/opt/steam-workshop-downloader https://steamcommunity.com/sharedfiles/filedetails\?id\=$ID; '
'unzip $ID.zip; '
'fi; '
'done'
)
return { return {
'left4dead2': { 'systemd': {
'servers': { 'units': {
server: { 'left4dead2-workshop.service': {
'rcon_password': repo.vault.password_for(f'{node.name} left4dead2 {server} rcon', length=24), 'Unit': {
'Description': 'install workshop items',
'After': 'network.target',
'Requires': 'steam-update.service',
'PartOf': 'steam-update.service'
},
'Service': {
'Type': 'oneshot',
'User': 'steam',
'ExecStart': f'/bin/bash -c {quote(command)}',
},
'Install': {
'WantedBy': {'multi-user.target'},
},
} }
for server in metadata.get('left4dead2/servers') }
}, }
},
} }
@metadata_reactor.provides( @metadata_reactor.provides(
'steam-workshop-download',
'systemd/units', 'systemd/units',
) )
def server_units(metadata): def server_units(metadata):
units = {} units = {}
workshop = {}
for name, config in metadata.get('left4dead2/servers').items(): for name, config in metadata.get('left4dead2/servers').items():
# mount overlay units[f'left4dead2-server-{name}.service'] = {
mountpoint = f'/opt/steam/left4dead2-servers/{name}'
mount_unit_name = mountpoint[1:].replace('-', '\\x2d').replace('/', '-') + '.mount'
units[mount_unit_name] = {
'Unit': {
'Description': f"Mount left4dead2 server {name} overlay",
'Conflicts': {'umount.target'},
'Before': {'umount.target'},
},
'Mount': {
'What': 'overlay',
'Where': mountpoint,
'Type': 'overlay',
'Options': ','.join([
'auto',
'lowerdir=/opt/steam/left4dead2',
f'upperdir=/opt/steam-zfs-overlay-workarounds/{name}/upper',
f'workdir=/opt/steam-zfs-overlay-workarounds/{name}/workdir',
]),
},
'Install': {
'RequiredBy': {
f'left4dead2-{name}.service',
},
},
}
# individual workshop
workshop_ids = config.get('workshop', set()) | metadata.get('left4dead2/workshop', set())
if workshop_ids:
workshop[f'left4dead2-{name}'] = {
'ids': workshop_ids,
'path': f'/opt/steam/left4dead2-servers/{name}/left4dead2/addons',
'user': 'steam',
'requires': {
mount_unit_name,
},
'required_by': {
f'left4dead2-{name}.service',
},
}
# left4dead2 server unit
units[f'left4dead2-{name}.service'] = {
'Unit': { 'Unit': {
'Description': f'left4dead2 server {name}', 'Description': f'left4dead2 server {name}',
'After': {'steam-update.service'}, 'After': 'network.target',
'Requires': {'steam-update.service'}, 'Requires': 'steam-update.service',
}, },
'Service': { 'Service': {
'User': 'steam', 'User': 'steam',
'Group': 'steam', 'Group': 'steam',
'WorkingDirectory': f'/opt/steam/left4dead2-servers/{name}', 'WorkingDirectory': '/opt/left4dead2',
'ExecStart': f'/opt/steam/left4dead2-servers/{name}/srcds_run -port {config["port"]} +exec server.cfg', 'ExecStart': f'/opt/left4dead2/srcds_run -port {config["port"]} -insecure +map {config["map"]} +exec server-{name}.cfg',
'Restart': 'on-failure', 'Restart': 'on-failure',
}, },
'Install': { 'Install': {
@ -104,24 +83,7 @@ def server_units(metadata):
} }
return { return {
'steam-workshop-download': workshop,
'systemd': { 'systemd': {
'units': units, 'units': units,
}, },
} }
@metadata_reactor.provides(
'nftables/input',
)
def firewall(metadata):
ports = set(str(server['port']) for server in metadata.get('left4dead2/servers').values())
return {
'nftables': {
'input': {
f"tcp dport {{ {', '.join(sorted(ports))} }} accept",
f"udp dport {{ {', '.join(sorted(ports))} }} accept",
},
},
}

View file

@ -1,6 +1,6 @@
https://github.com/dehydrated-io/dehydrated/wiki/example-dns-01-nsupdate-script https://github.com/dehydrated-io/dehydrated/wiki/example-dns-01-nsupdate-script
```sh ```
printf "server 127.0.0.1 printf "server 127.0.0.1
zone acme.resolver.name. zone acme.resolver.name.
update add _acme-challenge.ckn.li.acme.resolver.name. 600 IN TXT "hello" update add _acme-challenge.ckn.li.acme.resolver.name. 600 IN TXT "hello"

View file

@ -4,7 +4,7 @@ set -o pipefail
deploy_challenge() { deploy_challenge() {
echo " echo "
server ${server} server 10.0.11.3
zone ${zone}. zone ${zone}.
update add $1.${zone}. 60 IN TXT \"$3\" update add $1.${zone}. 60 IN TXT \"$3\"
send send
@ -13,7 +13,7 @@ deploy_challenge() {
clean_challenge() { clean_challenge() {
echo " echo "
server ${server} server 10.0.11.3
zone ${zone}. zone ${zone}.
update delete $1.${zone}. TXT update delete $1.${zone}. TXT
send send

View file

@ -56,7 +56,6 @@ for domain in node.metadata.get('letsencrypt/domains').keys():
'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true', 'unless': f'/etc/dehydrated/letsencrypt-ensure-some-certificate {domain} true',
'needs': { 'needs': {
'file:/etc/dehydrated/letsencrypt-ensure-some-certificate', 'file:/etc/dehydrated/letsencrypt-ensure-some-certificate',
'pkg_apt:dehydrated',
}, },
'needed_by': { 'needed_by': {
'svc_systemd:nginx', 'svc_systemd:nginx',

View file

@ -1,41 +0,0 @@
from shlex import quote
def generate_sysctl_key_value_pairs_from_json(json_data, parents=[]):
if isinstance(json_data, dict):
for key, value in json_data.items():
yield from generate_sysctl_key_value_pairs_from_json(value, [*parents, key])
elif isinstance(json_data, list):
raise ValueError(f"List not supported: '{json_data}'")
else:
# If it's a leaf node, yield the path
yield (parents, json_data)
key_value_pairs = generate_sysctl_key_value_pairs_from_json(node.metadata.get('sysctl'))
files= {
'/etc/sysctl.conf': {
'content': '\n'.join(
sorted(
f"{'.'.join(path)}={value}"
for path, value in key_value_pairs
),
),
'triggers': [
'svc_systemd:systemd-sysctl.service:restart',
],
},
}
svc_systemd = {
'systemd-sysctl.service': {},
}
for path, value in key_value_pairs:
actions[f'reload_sysctl.conf_{path}'] = {
'command': f"sysctl --values {'.'.join(path)} | grep -q {quote('^'+value+'$')}",
'needs': [
f'action:systemd-sysctl.service',
f'action:systemd-sysctl.service:restart',
],
}

View file

@ -1,3 +0,0 @@
defaults = {
'sysctl': {},
}

View file

@ -20,19 +20,18 @@ files = {
} }
actions = { actions = {
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'triggers': {
'action:locale-gen',
},
},
'locale-gen': { 'locale-gen': {
'command': 'locale-gen', 'command': 'locale-gen',
'triggered': True, 'triggered': True,
'needs': { 'needs': {
'pkg_apt:locales', 'pkg_apt:locales',
'action:systemd-locale', },
},
'systemd-locale': {
'command': f'localectl set-locale LANG="{default_locale}"',
'unless': f'localectl | grep -Fi "system locale" | grep -Fi "{default_locale}"',
'preceded_by': {
'action:locale-gen',
}, },
}, },
} }

View file

@ -1,6 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
export BW_ITEM_WORKERS=$(expr "$(sysctl -n hw.logicalcpu)" '*' 12 '/' 10)
export BW_NODE_WORKERS=$(expr 320 '/' "$BW_ITEM_WORKERS")

View file

@ -1,6 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
PATH_add "/opt/homebrew/opt/gnu-sed/libexec/gnubin"
PATH_add "/opt/homebrew/opt/grep/libexec/gnubin"

View file

@ -1,46 +0,0 @@
#!/bin/bash -l
sudo tee /etc/pam.d/sudo << EOT
# sudo: auth account password session
auth sufficient pam_tid.so
auth sufficient pam_smartcard.so
auth required pam_opendirectory.so
account required pam_permit.so
password required pam_deny.so
session required pam_permit.so
EOT
sudo xcodebuild -license accept
xcode-select --install
git -C ~/.zsh/oh-my-zsh pull
brew upgrade
brew upgrade --cask --greedy
pyenv install --skip-existing
sudo softwareupdate -ia --verbose
if test "$(defaults read com.apple.dock autohide-time-modifier)" == 0.16
then
defaults write com.apple.dock autohide-time-modifier -float 0.16
RESTART_DOCK=TRUE
fi
if test "$(defaults read com.apple.dock autohide-delay)" -ne 0
then
defaults write com.apple.dock autohide-delay -float 0
RESTART_DOCK=TRUE
fi
if test "$RESTART_DOCK" = TRUE
then
killall Dock
fi
sudo systemsetup -setremotelogin on # enable ssh
pip install --upgrade pip
# https://sysadmin-journal.com/apache-directory-studio-on-the-apple-m1/

View file

@ -1,9 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
if test -f .venv/bin/python && test "$(realpath .venv/bin/python)" != "$(realpath "$(pyenv which python)")"
then
echo "rebuilding venv für new python version"
rm -rf .venv .pip_upgrade_timestamp
fi

View file

@ -1,3 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"

View file

@ -1,27 +0,0 @@
#!/usr/bin/env bash
cd "$OLDPWD"
python3 -m venv .venv
source .venv/bin/activate
PATH_add .venv/bin
NOW=$(date +%s)
if test -e .pip_upgrade_timestamp
then
LAST=$(cat .pip_upgrade_timestamp)
else
LAST=0
fi
DELTA=$(expr "$NOW" - "$LAST")
echo "last pip upgrade $DELTA seconds ago"
if test "$DELTA" -gt 86400
then
python3 -m pip --require-virtualenv install pip wheel --upgrade
python3 -m pip --require-virtualenv install -r requirements.txt --upgrade
if test -e optional-requirements.txt
then
python3 -m pip --require-virtualenv install -r optional-requirements.txt --upgrade
fi
date +%s > .pip_upgrade_timestamp
fi

View file

@ -1,33 +0,0 @@
export PATH=~/.bin:$PATH
export PATH=~/.cargo/bin:$PATH
export ZSH=~/.zsh/oh-my-zsh
export ZSH_HOSTNAME='sm'
ZSH_THEME="bw"
HIST_STAMPS="yyyy/mm/dd"
plugins=(
zsh-autosuggestions
git
)
source $ZSH/oh-my-zsh.sh
ulimit -S -n 24000
antivir() {
printf 'scanning for viruses' && sleep 1 && printf '.' && sleep 1 && printf '.' && sleep 1 && printf '.' &&
sleep 1 && echo '\nyour computer is safe!'
}
eval "$(rbenv init -)"
eval "$(pyenv init -)"
eval "$(direnv hook zsh)"
eval "$(op completion zsh)"; compdef _op op
# //S/M
sshn() {
ssh "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}
pingn() {
ping "$(tr '.' ' ' <<< "$1" | tac -s ' ' | xargs | tr ' ' '.').smhss.de"
}

View file

@ -1,47 +0,0 @@
# brew install
actions['brew_install'] = {
'command': '/opt/homebrew/bin/brew install ' + ' '.join(node.metadata.get('brew')),
'unless': f"""PKGS=$(/opt/homebrew/bin/brew leaves); for p in {' '.join(node.metadata.get('brew'))}; do grep -q "$p" <<< $PKGS || exit 9; done"""
}
# bw init
directories['/Users/mwiegand/.config/bundlewrap/lock'] = {}
# home
files['/Users/mwiegand/.zshrc'] = {
'source': 'zshrc',
'mode': '0644',
}
# updater
files['/Users/mwiegand/.bin/macbook-update'] = {
'mode': '755',
}
with open(f'{repo.path}/bundles/zsh/files/bw.zsh-theme') as f:
files['/Users/mwiegand/.zsh/oh-my-zsh/themes/bw.zsh-theme'] = {
'content': f.read(),
'mode': '0644',
}
# direnv
directories['/Users/mwiegand/.local/share/direnv'] = {}
files['/Users/mwiegand/.local/share/direnv/gnu'] = {}
files['/Users/mwiegand/.local/share/direnv/pyenv'] = {}
files['/Users/mwiegand/.local/share/direnv/venv'] = {}
files['/Users/mwiegand/.local/share/direnv/bundlewrap'] = {}
##################
for element in [*files.values(), *directories.values()]:
element.update({
'owner': 'mwiegand',
'group': 'staff',
**element,
})

View file

@ -1,3 +0,0 @@
defaults = {
'brew': {},
}

View file

@ -1,22 +0,0 @@
# This is the mailman extension configuration file to enable HyperKitty as an
# archiver. Remember to add the following lines in the mailman.cfg file:
#
# [archiver.hyperkitty]
# class: mailman_hyperkitty.Archiver
# enable: yes
# configuration: /etc/mailman3/mailman-hyperkitty.cfg
#
[general]
# This is your HyperKitty installation, preferably on the localhost. This
# address will be used by Mailman to forward incoming emails to HyperKitty
# for archiving. It does not need to be publicly available, in fact it's
# better if it is not.
# However, if your Mailman installation is accessed via HTTPS, the URL needs
# to match your SSL certificate (e.g. https://lists.example.com/hyperkitty).
base_url: http://${hostname}/mailman3/hyperkitty/
# The shared api_key, must be identical except for quoting to the value of
# MAILMAN_ARCHIVER_KEY in HyperKitty's settings.
api_key: ${archiver_key}

View file

@ -1,190 +0,0 @@
ACCOUNT_EMAIL_VERIFICATION='none'
# This file is imported by the Mailman Suite. It is used to override
# the default settings from /usr/share/mailman3-web/settings.py.
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '${secret_key}'
ADMINS = (
('Mailman Suite Admin', 'root@localhost'),
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
# Set to '*' per default in the Deian package to allow all hostnames. Mailman3
# is meant to run behind a webserver reverse proxy anyway.
ALLOWED_HOSTS = [
'${hostname}',
]
# Mailman API credentials
MAILMAN_REST_API_URL = 'http://localhost:8001'
MAILMAN_REST_API_USER = 'restadmin'
MAILMAN_REST_API_PASS = '${api_password}'
MAILMAN_ARCHIVER_KEY = '${archiver_key}'
MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1')
# Application definition
INSTALLED_APPS = (
'hyperkitty',
'postorius',
'django_mailman3',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_gravatar',
'compressor',
'haystack',
'django_extensions',
'django_q',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django_mailman3.lib.auth.fedora',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.gitlab',
#'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.facebook',
#'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.stackexchange',
)
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
# Use 'sqlite3', 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'ENGINE': 'django.db.backends.mysql',
# DB name or path to database file if using sqlite3.
#'NAME': '/var/lib/mailman3/web/mailman3web.db',
'NAME': 'mailman',
# The following settings are not used with sqlite3:
'USER': 'mailman',
'PASSWORD': '${db_password}',
# HOST: empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '127.0.0.1',
# PORT: set to empty string for default.
'PORT': '5432',
# OPTIONS: Extra parameters to use when connecting to the database.
'OPTIONS': {
# Set sql_mode to 'STRICT_TRANS_TABLES' for MySQL. See
# https://docs.djangoproject.com/en/1.11/ref/
# databases/#setting-sql-mode
#'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
},
}
}
# If you're behind a proxy, use the X-Forwarded-Host header
# See https://docs.djangoproject.com/en/1.8/ref/settings/#use-x-forwarded-host
USE_X_FORWARDED_HOST = True
# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_SCHEME', 'https')
# Other security settings
# SECURE_SSL_REDIRECT = True
# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT
# contains at least this line:
# SECURE_REDIRECT_EXEMPT = [
# "archives/api/mailman/.*", # Request from Mailman.
# ]
# SESSION_COOKIE_SECURE = True
# SECURE_CONTENT_TYPE_NOSNIFF = True
# SECURE_BROWSER_XSS_FILTER = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# X_FRAME_OPTIONS = 'DENY'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Set default domain for email addresses.
EMAILNAME = 'localhost.local'
# If you enable internal authentication, this is the address that the emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#default-from-email
# DEFAULT_FROM_EMAIL = "mailing-lists@you-domain.org"
DEFAULT_FROM_EMAIL = 'postorius@{}'.format(EMAILNAME)
# If you enable email reporting for error messages, this is where those emails
# will appear to be coming from. Make sure you set a valid domain name,
# otherwise the emails may get rejected.
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SERVER_EMAIL
# SERVER_EMAIL = 'root@your-domain.org'
SERVER_EMAIL = 'root@{}'.format(EMAILNAME)
# Django Allauth
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
#
# Social auth
#
SOCIALACCOUNT_PROVIDERS = {
#'openid': {
# 'SERVERS': [
# dict(id='yahoo',
# name='Yahoo',
# openid_url='http://me.yahoo.com'),
# ],
#},
#'google': {
# 'SCOPE': ['profile', 'email'],
# 'AUTH_PARAMS': {'access_type': 'online'},
#},
#'facebook': {
# 'METHOD': 'oauth2',
# 'SCOPE': ['email'],
# 'FIELDS': [
# 'email',
# 'name',
# 'first_name',
# 'last_name',
# 'locale',
# 'timezone',
# ],
# 'VERSION': 'v2.4',
#},
}
# On a production setup, setting COMPRESS_OFFLINE to True will bring a
# significant performance improvement, as CSS files will not need to be
# recompiled on each requests. It means running an additional "compress"
# management command after each code upgrade.
# http://django-compressor.readthedocs.io/en/latest/usage/#offline-compression
COMPRESS_OFFLINE = True
POSTORIUS_TEMPLATE_BASE_URL = 'http://${hostname}/mailman3/'

View file

@ -1,277 +0,0 @@
# Copyright (C) 2008-2017 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
# This file contains the Debian configuration for mailman. It uses ini-style
# formats under the lazr.config regime to define all system configuration
# options. See <https://launchpad.net/lazr.config> for details.
[mailman]
# This address is the "site owner" address. Certain messages which must be
# delivered to a human, but which can't be delivered to a list owner (e.g. a
# bounce from a list owner), will be sent to this address. It should point to
# a human.
site_owner: ${site_owner_email}
# This is the local-part of an email address used in the From field whenever a
# message comes from some entity to which there is no natural reply recipient.
# Mailman will append '@' and the host name of the list involved. This
# address must not bounce and it must not point to a Mailman process.
noreply_address: noreply
# The default language for this server.
default_language: de
# Membership tests for posting purposes are usually performed by looking at a
# set of headers, passing the test if any of their values match a member of
# the list. Headers are checked in the order given in this variable. The
# value From_ means to use the envelope sender. Field names are case
# insensitive. This is a space separate list of headers.
sender_headers: from from_ reply-to sender
# Mail command processor will ignore mail command lines after designated max.
email_commands_max_lines: 10
# Default length of time a pending request is live before it is evicted from
# the pending database.
pending_request_life: 3d
# How long should files be saved before they are evicted from the cache?
cache_life: 7d
# A callable to run with no arguments early in the initialization process.
# This runs before database initialization.
pre_hook:
# A callable to run with no arguments late in the initialization process.
# This runs after adapters are initialized.
post_hook:
# Which paths.* file system layout to use.
# You should not change this variable.
layout: debian
# Can MIME filtered messages be preserved by list owners?
filtered_messages_are_preservable: no
# How should text/html parts be converted to text/plain when the mailing list
# is set to convert HTML to plaintext? This names a command to be called,
# where the substitution variable $filename is filled in by Mailman, and
# contains the path to the temporary file that the command should read from.
# The command should print the converted text to stdout.
html_to_plain_text_command: /usr/bin/lynx -dump $filename
# Specify what characters are allowed in list names. Characters outside of
# the class [-_.+=!$*{}~0-9a-z] matched case insensitively are never allowed,
# but this specifies a subset as the only allowable characters. This must be
# a valid character class regexp or the effect on list creation is
# unpredictable.
listname_chars: [-_.0-9a-z]
[shell]
# `mailman shell` (also `withlist`) gives you an interactive prompt that you
# can use to interact with an initialized and configured Mailman system. Use
# --help for more information. This section allows you to configure certain
# aspects of this interactive shell.
# Customize the interpreter prompt.
prompt: >>>
# Banner to show on startup.
banner: Welcome to the GNU Mailman shell
# Use IPython as the shell, which must be found on the system. Valid values
# are `no`, `yes`, and `debug` where the latter is equivalent to `yes` except
# that any import errors will be displayed to stderr.
use_ipython: no
# Set this to allow for command line history if readline is available. This
# can be as simple as $var_dir/history.py to put the file in the var directory.
history_file:
[paths.debian]
# Important directories for Mailman operation. These are defined here so that
# different layouts can be supported. For example, a developer layout would
# be different from a FHS layout. Most paths are based off the var_dir, and
# often just setting that will do the right thing for all the other paths.
# You might also have to set spool_dir though.
#
# Substitutions are allowed, but must be of the form $var where 'var' names a
# configuration variable in the paths.* section. Substitutions are expanded
# recursively until no more $-variables are present. Beware of infinite
# expansion loops!
#
# This is the root of the directory structure that Mailman will use to store
# its run-time data.
var_dir: /var/lib/mailman3
# This is where the Mailman queue files directories will be created.
queue_dir: $var_dir/queue
# This is the directory containing the Mailman 'runner' and 'master' commands
# if set to the string '$argv', it will be taken as the directory containing
# the 'mailman' command.
bin_dir: /usr/lib/mailman3/bin
# All list-specific data.
list_data_dir: $var_dir/lists
# Directory where log files go.
log_dir: /var/log/mailman3
# Directory for system-wide locks.
lock_dir: $var_dir/locks
# Directory for system-wide data.
data_dir: $var_dir/data
# Cache files.
cache_dir: $var_dir/cache
# Directory for configuration files and such.
etc_dir: /etc/mailman3
# Directory containing Mailman plugins.
ext_dir: $var_dir/ext
# Directory where the default IMessageStore puts its messages.
messages_dir: $var_dir/messages
# Directory for archive backends to store their messages in. Archivers should
# create a subdirectory in here to store their files.
archive_dir: $var_dir/archives
# Root directory for site-specific template override files.
template_dir: $var_dir/templates
# There are also a number of paths to specific file locations that can be
# defined. For these, the directory containing the file must already exist,
# or be one of the directories created by Mailman as per above.
#
# This is where PID file for the master runner is stored.
pid_file: /run/mailman3/master.pid
# Lock file.
lock_file: $lock_dir/master.lck
[database]
# The class implementing the IDatabase.
class: mailman.database.sqlite.SQLiteDatabase
#class: mailman.database.mysql.MySQLDatabase
#class: mailman.database.postgresql.PostgreSQLDatabase
# Use this to set the Storm database engine URL. You generally have one
# primary database connection for all of Mailman. List data and most rosters
# will store their data in this database, although external rosters may access
# other databases in their own way. This string supports standard
# 'configuration' substitutions.
url: sqlite:///$DATA_DIR/mailman.db
#url: mysql+pymysql://mailman3:mmpass@localhost/mailman3?charset=utf8&use_unicode=1
#url: postgresql://mailman3:mmpass@localhost/mailman3
debug: no
[logging.debian]
# This defines various log settings. The options available are:
#
# - level -- Overrides the default level; this may be any of the
# standard Python logging levels, case insensitive.
# - format -- Overrides the default format string
# - datefmt -- Overrides the default date format string
# - path -- Overrides the default logger path. This may be a relative
# path name, in which case it is relative to Mailman's LOG_DIR,
# or it may be an absolute path name. You cannot change the
# handler class that will be used.
# - propagate -- Boolean specifying whether to propagate log message from this
# logger to the root "mailman" logger. You cannot override
# settings for the root logger.
#
# In this section, you can define defaults for all loggers, which will be
# prefixed by 'mailman.'. Use subsections to override settings for specific
# loggers. The names of the available loggers are:
#
# - archiver -- All archiver output
# - bounce -- All bounce processing logs go here
# - config -- Configuration issues
# - database -- Database logging (SQLAlchemy and Alembic)
# - debug -- Only used for development
# - error -- All exceptions go to this log
# - fromusenet -- Information related to the Usenet to Mailman gateway
# - http -- Internal wsgi-based web interface
# - locks -- Lock state changes
# - mischief -- Various types of hostile activity
# - runner -- Runner process start/stops
# - smtp -- Successful SMTP activity
# - smtp-failure -- Unsuccessful SMTP activity
# - subscribe -- Information about leaves/joins
# - vette -- Message vetting information
format: %(asctime)s (%(process)d) %(message)s
datefmt: %b %d %H:%M:%S %Y
propagate: no
level: info
path: mailman.log
[webservice]
# The hostname at which admin web service resources are exposed.
hostname: localhost
# The port at which the admin web service resources are exposed.
port: 8001
# Whether or not requests to the web service are secured through SSL.
use_https: no
# Whether or not to show tracebacks in an HTTP response for a request that
# raised an exception.
show_tracebacks: yes
# The API version number for the current (highest) API.
api_version: 3.1
# The administrative username.
admin_user: restadmin
# The administrative password.
admin_pass: ${api_password}
[mta]
# The class defining the interface to the incoming mail transport agent.
#incoming: mailman.mta.exim4.LMTP
incoming: mailman.mta.postfix.LMTP
# The callable implementing delivery to the outgoing mail transport agent.
# This must accept three arguments, the mailing list, the message, and the
# message metadata dictionary.
outgoing: mailman.mta.deliver.deliver
# How to connect to the outgoing MTA. If smtp_user and smtp_pass is given,
# then Mailman will attempt to log into the MTA when making a new connection.
# smtp_host: smtp.ionos.de
# smtp_port: 587
# smtp_user: ${smtp_user}
# smtp_pass: ${smtp_password}
# smtp_secure_mode: starttls
smtp_host: 127.0.0.1
smtp_port: 25
smtp_user:
smtp_pass:
# Where the LMTP server listens for connections. Use 127.0.0.1 instead of
# localhost for Postfix integration, because Postfix only consults DNS
# (e.g. not /etc/hosts).
lmtp_host: 127.0.0.1
lmtp_port: 8024
# Where can we find the mail server specific configuration file? The path can
# be either a file system path or a Python import path. If the value starts
# with python: then it is a Python import path, otherwise it is a file system
# path. File system paths must be absolute since no guarantees are made about
# the current working directory. Python paths should not include the trailing
# .cfg, which the file must end with.
#configuration: python:mailman.config.exim4
configuration: python:mailman.config.postfix

View file

@ -1,52 +0,0 @@
# See /usr/share/postfix/main.cf.dist for a commented, more complete version
# Debian specific: Specifying a file name will cause the first
# line of that file to be used as the name. The Debian default
# is /etc/mailname.
#myorigin = /etc/mailname
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
# appending .domain is the MUA's job.
append_dot_mydomain = no
# Uncomment the next line to generate "delayed mail" warnings
#delay_warning_time = 4h
readme_directory = no
# See http://www.postfix.org/COMPATIBILITY_README.html -- default to 3.6 on
# fresh installs.
compatibility_level = 3.6
# TLS parameters
smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
smtpd_tls_security_level=may
smtp_tls_CApath=/etc/ssl/certs
smtp_tls_security_level=may
smtp_tls_session_cache_database = <%text>btree:${data_directory}/smtp_scache</%text>
smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination
myhostname = ${hostname}
alias_maps = hash:/etc/aliases
alias_database = hash:/etc/aliases
mydestination = $myhostname, localhost, localhost.localdomain, ${hostname}
relayhost =
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0
recipient_delimiter = +
inet_interfaces = all
inet_protocols = all
unknown_local_recipient_reject_code = 550
owner_request_special = no
transport_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
local_recipient_maps =
hash:/var/lib/mailman3/data/postfix_lmtp
relay_domains =
hash:/var/lib/mailman3/data/postfix_domains

View file

@ -1,50 +0,0 @@
[uwsgi]
# Port on which uwsgi will be listening.
uwsgi-socket = /run/mailman3-web/uwsgi.sock
#Enable threading for python
enable-threads = true
# Move to the directory wher the django files are.
chdir = /usr/share/mailman3-web
# Use the wsgi file provided with the django project.
wsgi-file = wsgi.py
# Setup default number of processes and threads per process.
master = true
process = 2
threads = 2
# Drop privielges and don't run as root.
uid = www-data
gid = www-data
plugins = python3
# Setup the django_q related worker processes.
attach-daemon = python3 manage.py qcluster
# Setup hyperkitty's cron jobs.
#unique-cron = -1 -1 -1 -1 -1 ./manage.py runjobs minutely
#unique-cron = -15 -1 -1 -1 -1 ./manage.py runjobs quarter_hourly
#unique-cron = 0 -1 -1 -1 -1 ./manage.py runjobs hourly
#unique-cron = 0 0 -1 -1 -1 ./manage.py runjobs daily
#unique-cron = 0 0 1 -1 -1 ./manage.py runjobs monthly
#unique-cron = 0 0 -1 -1 0 ./manage.py runjobs weekly
#unique-cron = 0 0 1 1 -1 ./manage.py runjobs yearly
# Setup the request log.
#req-logger = file:/var/log/mailman3/web/mailman-web.log
# Log cron seperately.
#logger = cron file:/var/log/mailman3/web/mailman-web-cron.log
#log-route = cron uwsgi-cron
# Log qcluster commands seperately.
#logger = qcluster file:/var/log/mailman3/web/mailman-web-qcluster.log
#log-route = qcluster uwsgi-daemons
# Last log and it logs the rest of the stuff.
#logger = file:/var/log/mailman3/web/mailman-web-error.log
logto = /var/log/mailman3/web/mailman-web.log

View file

@ -1,104 +0,0 @@
directories = {
'/var/lib/mailman3': {
'owner': 'list',
'group': 'list',
'needs': {
'zfs_dataset:tank/mailman',
'pkg_apt:mailman3-full',
},
'needed_by': {
'svc_systemd:mailman3.service',
'svc_systemd:mailman3-web.service',
},
},
}
files = {
'/etc/postfix/main.cf': {
'source': 'postfix.cf',
'content_type': 'mako',
'mode': '0644',
'context': {
'hostname': node.metadata.get('mailman/hostname'),
},
'needs': {
'pkg_apt:postfix',
},
'triggers': {
'svc_systemd:postfix.service:restart',
},
},
'/etc/mailman3/mailman.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-web.py': {
'content_type': 'mako',
'owner': 'root',
'group': 'www-data',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/mailman-hyperkitty.cfg': {
'content_type': 'mako',
'owner': 'root',
'group': 'list',
'mode': '0640',
'context': node.metadata.get('mailman'),
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
'/etc/mailman3/uwsgi.ini': {
'content_type': 'text',
'owner': 'root',
'group': 'root',
'mode': '0644',
'needs': {
'pkg_apt:mailman3-full',
},
'triggers': {
'svc_systemd:mailman3.service:restart',
'svc_systemd:mailman3-web.service:restart',
},
},
}
svc_systemd = {
'postfix.service': {
'needs': {
'pkg_apt:postfix',
},
},
'mailman3.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
'mailman3-web.service': {
'needs': {
'pkg_apt:mailman3-full',
},
},
}

View file

@ -1,116 +0,0 @@
import base64
def derive_mailadmin_secret(metadata, salt):
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_{salt}', length=32).value
)
return base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
defaults = {
'apt': {
'packages': {
'mailman3-full': {
'needs': {
'postgres_db:mailman',
'postgres_role:mailman',
'zfs_dataset:tank/mailman',
}
},
'postfix': {},
'python3-psycopg2': {
'needed_by': {
'pkg_apt:mailman3-full',
},
},
'apache2': {
'installed': False,
'needs': {
'pkg_apt:mailman3-full',
},
},
},
},
'zfs': {
'datasets': {
'tank/mailman': {
'mountpoint': '/var/lib/mailman3',
},
},
},
}
@metadata_reactor.provides(
'postgresql',
'mailman',
)
def postgresql(metadata):
node_id = metadata.get('id')
db_password = repo.vault.password_for(f'{node_id} database mailman')
return {
'postgresql': {
'databases': {
'mailman': {
'owner': 'mailman',
},
},
'roles': {
'mailman': {
'password': db_password,
},
},
},
'mailman': {
'db_password': db_password,
},
}
@metadata_reactor.provides(
'nginx/vhosts',
)
def nginx(metadata):
return {
'nginx': {
'vhosts': {
metadata.get('mailman/hostname'): {
'content': 'mailman/vhost.conf',
},
},
},
}
@metadata_reactor.provides(
'mailman/secret_key',
)
def secret_key(metadata):
import base64
node_id = metadata.get('id')
raw = base64.b64decode(
repo.vault.random_bytes_as_base64_for(f'{node_id}_mailman_secret_key', length=32).value
)
secret_key = base64.urlsafe_b64encode(raw).rstrip(b'=').decode('ascii')
return {
'mailman': {
'secret_key': secret_key,
},
}
@metadata_reactor.provides(
'mailman',
)
def secrets(metadata):
return {
'mailman': {
'web_secret': derive_mailadmin_secret(metadata, 'secret_key'),
'api_password': derive_mailadmin_secret(metadata, 'api_password'),
'archiver_key': derive_mailadmin_secret(metadata, 'archiver_key'),
},
}

View file

@ -1,6 +1,6 @@
<?php <?php
// https://raw.githubusercontent.com/Radiergummi/autodiscover/master/autodiscover/autodiscover.php
/******************************** /********************************
* Autodiscover responder * Autodiscover responder
@ -8,45 +8,45 @@
* This PHP script is intended to respond to any request to http(s)://mydomain.com/autodiscover/autodiscover.xml. * This PHP script is intended to respond to any request to http(s)://mydomain.com/autodiscover/autodiscover.xml.
* If configured properly, it will send a spec-complient autodiscover XML response, pointing mail clients to the * If configured properly, it will send a spec-complient autodiscover XML response, pointing mail clients to the
* appropriate mail services. * appropriate mail services.
* If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if * If you use MAPI or ActiveSync, stick with the Autodiscover service your mail server provides for you. But if
* you use POP/IMAP servers, this will provide autoconfiguration to Outlook, Apple Mail and mobile devices. * you use POP/IMAP servers, this will provide autoconfiguration to Outlook, Apple Mail and mobile devices.
* *
* To work properly, you'll need to set the service (sub)domains below in the settings section to the correct * To work properly, you'll need to set the service (sub)domains below in the settings section to the correct
* domain names, adjust ports and SSL. * domain names, adjust ports and SSL.
*/ */
//get raw POST data so we can extract the email address
$request = file_get_contents("php://input"); $request = file_get_contents("php://input");
// optional debug log
# file_put_contents( 'request.log', $request, FILE_APPEND ); # file_put_contents( 'request.log', $request, FILE_APPEND );
// retrieve email address from client request
preg_match( "/\<EMailAddress\>(.*?)\<\/EMailAddress\>/", $request, $email ); preg_match( "/\<EMailAddress\>(.*?)\<\/EMailAddress\>/", $request, $email );
// check for invalid mail, to prevent XSS
if (filter_var($email[1], FILTER_VALIDATE_EMAIL) === false) { if (filter_var($email[1], FILTER_VALIDATE_EMAIL) === false) {
throw new Exception('Invalid E-Mail provided'); throw new Exception('Invalid E-Mail provided');
} }
// get domain from email address
$domain = substr( strrchr( $email[1], "@" ), 1 ); $domain = substr( strrchr( $email[1], "@" ), 1 );
/************************************** /**************************************
* Port and server settings below * * Port and server settings below *
**************************************/ **************************************/
// IMAP settings
$imapServer = 'imap.' . $domain; // imap.example.com $imapServer = 'imap.' . $domain; // imap.example.com
$imapPort = 993; $imapPort = 993;
$imapSSL = true; $imapSSL = true;
// SMTP settings
$smtpServer = 'smtp.' . $domain; // smtp.example.com $smtpServer = 'smtp.' . $domain; // smtp.example.com
$smtpPort = 587; $smtpPort = 587;
$smtpSSL = true; $smtpSSL = true;
//set Content-Type
header( 'Content-Type: application/xml' ); header( 'Content-Type: application/xml' );
?> ?>
<?php echo '<?xml version="1.0" encoding="utf-8" ?>'; ?> <?php echo '<?xml version="1.0" encoding="utf-8" ?>'; ?>

View file

@ -24,7 +24,6 @@ def nginx(metadata):
'context': { 'context': {
'root': f"/var/www/{metadata.get('mailserver/autoconfig_hostname')}", 'root': f"/var/www/{metadata.get('mailserver/autoconfig_hostname')}",
}, },
'check_path': '/mail/config-v1.1.xml',
}, },
}, },
}, },
@ -60,7 +59,7 @@ def letsencrypt(metadata):
) )
def autoconfig(metadata): def autoconfig(metadata):
dns = {} dns = {}
for domain in metadata.get('mailserver/domains'): for domain in metadata.get('mailserver/domains'):
dns.update({ dns.update({
f'autoconfig.{domain}': { f'autoconfig.{domain}': {
@ -88,7 +87,7 @@ def autoconfig(metadata):
'SRV': {f"0 1 993 {metadata.get('mailserver/hostname')}."}, 'SRV': {f"0 1 993 {metadata.get('mailserver/hostname')}."},
}, },
}) })
return { return {
'dns': dns, 'dns': dns,
} }

View file

@ -1,12 +1 @@
mailserver echo -n 'WarumGehtDasNicht?' | argon2 FAPf+gTwqTRr+3H0cDktqw
==========
argin2 hashes
-------------
`echo -n 'WarumGehtDasNicht?' | argon2 FAPf+gTwqTRr+3H0cDktqw`
logs
----
`journalctl -u postfix@-.service -u dovecot.service -u rspamd.service -o cat -f`

View file

@ -33,12 +33,6 @@ defaults = {
'mountpoint': '/var/vmail', 'mountpoint': '/var/vmail',
'compression': 'on', 'compression': 'on',
}, },
'tank/vmail/index': {
'mountpoint': '/var/vmail/index',
'compression': 'on',
'com.sun:auto-snapshot': 'false',
'backup': False,
},
}, },
}, },
} }
@ -49,30 +43,12 @@ defaults = {
) )
def dns(metadata): def dns(metadata):
dns = {} dns = {}
for domain in metadata.get('mailserver/domains'): for domain in metadata.get('mailserver/domains'):
dns[domain] = { dns[domain] = {
'MX': [f"5 {metadata.get('mailserver/hostname')}."], 'MX': [f"5 {metadata.get('mailserver/hostname')}."],
'TXT': ['v=spf1 a mx -all'], 'TXT': ['v=spf1 a mx -all'],
} }
report_email = metadata.get('mailserver/dmarc_report_email')
dns[f'_dmarc.{domain}'] = {
'TXT': ['; '.join(f'{k}={v}' for k, v in {
# dmarc version
'v': 'DMARC1',
# reject on failure
'p': 'reject',
# standard reports
'rua': f'mailto:{report_email}',
# forensic reports
'fo': 1,
'ruf': f'mailto:{report_email}',
# require alignment between the DKIM domain and the parent Header From domain
'adkim': 's',
# require alignment between the SPF domain (the sender) and the Header From domain
'aspf': 's',
}.items())]
}
return { return {
'dns': dns, 'dns': dns,
@ -90,4 +66,4 @@ def letsencrypt(metadata):
}, },
}, },
}, },
} }

View file

@ -1 +0,0 @@
https://mariadb.com/kb/en/systemd/#configuring-mariadb-to-write-the-error-log-to-syslog

View file

@ -1,87 +0,0 @@
from shlex import quote
def mariadb(sql, **kwargs):
kwargs_string = ''.join(f" --{k} {v}" for k, v in kwargs.items())
return f"mariadb{kwargs_string} -Bsr --execute {quote(sql)}"
directories = {
'/var/lib/mysql': {
'owner': 'mysql',
'group': 'mysql',
'needs': [
'zfs_dataset:tank/mariadb',
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
files = {
'/etc/mysql/conf.d/override.conf': {
'content': repo.libs.ini.dumps(node.metadata.get('mariadb/conf')),
'content_type': 'text',
},
}
svc_systemd = {
'mariadb.service': {
'needs': [
'pkg_apt:mariadb-server',
'pkg_apt:mariadb-client',
],
},
}
actions = {
'mariadb_sec_remove_anonymous_users': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User=''"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User = ''") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
'mariadb_sec_remove_remote_root': {
'command': mariadb("DELETE FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')"),
'unless': mariadb("SELECT count(0) FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')") + " | grep -q '^0$'",
'needs': [
'svc_systemd:mariadb.service',
],
'triggers': [
'svc_systemd:mariadb.service:restart',
],
},
}
for db, conf in node.metadata.get('mariadb/databases', {}).items():
actions[f'mariadb_create_database_{db}'] = {
'command': mariadb(f"CREATE DATABASE {db}"),
'unless': mariadb(f"SHOW DATABASES LIKE '{db}'") + f" | grep -q '^{db}$'",
'needs': [
'svc_systemd:mariadb.service',
],
}
actions[f'mariadb_user_{db}_create'] = {
'command': mariadb(f"CREATE USER {db}"),
'unless': mariadb(f"SELECT User FROM mysql.user WHERE User = '{db}'") + f" | grep -q '^{db}$'",
'needs': [
f'action:mariadb_create_database_{db}',
],
}
pw = conf['password']
actions[f'mariadb_user_{db}_password'] = {
'command': mariadb(f"SET PASSWORD FOR {db} = PASSWORD('{conf['password']}')"),
'unless': f'echo {quote(pw)} | mariadb -u {db} -e quit -p',
'needs': [
f'action:mariadb_user_{db}_create',
],
}
actions[f'mariadb_grant_privileges_to_{db}'] = {
'command': mariadb(f"GRANT ALL PRIVILEGES ON {db}.* TO '{db}'", database=db),
'unless': mariadb(f"SHOW GRANTS FOR {db}") + f" | grep -q '^GRANT ALL PRIVILEGES ON `{db}`.* TO `{db}`@`%`'",
'needs': [
f'action:mariadb_user_{db}_create',
],
}

View file

@ -1,45 +0,0 @@
defaults = {
'apt': {
'packages': {
'mariadb-server': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
'mariadb-client': {
'needs': {
'zfs_dataset:tank/mariadb',
},
},
},
},
'mariadb': {
'databases': {},
'conf': {
# https://www.reddit.com/r/zfs/comments/u1xklc/mariadbmysql_database_settings_for_zfs
'mysqld': {
'skip-innodb_doublewrite': None,
'innodb_flush_method': 'fsync',
'innodb_doublewrite': '0',
'innodb_use_atomic_writes': '0',
'innodb_use_native_aio': '0',
'innodb_read_io_threads': '10',
'innodb_write_io_threads': '10',
'innodb_buffer_pool_size': '26G',
'innodb_flush_log_at_trx_commit': '1',
'innodb_log_file_size': '1G',
'innodb_flush_neighbors': '0',
'innodb_fast_shutdown': '2',
},
},
},
'zfs': {
'datasets': {
'tank/mariadb': {
'mountpoint': '/var/lib/mysql',
'recordsize': '16384',
'atime': 'off',
},
},
},
}

Some files were not shown because too many files have changed in this diff Show more